Merge "lpm-levels: Do not consider cluster residency in suspend"
diff --git a/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt b/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt
new file mode 100644
index 0000000..655bf89
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qmp-debugfs-client.txt
@@ -0,0 +1,17 @@
+QMP debugfs client:
+-----------------
+
+QTI Messaging Protocol(QMP) debugfs client is an interface for clients to
+send data to the Always on processor using QMP.
+
+Required properties :
+- compatible : must be "qcom,debugfs-qmp-client"
+- mboxes : list of QMP mailbox phandle and channel identifier tuples.
+- mbox-names : names of the listed mboxes
+
+Example :
+	qcom,qmp-client {
+		compatible = "qcom,debugfs-qmp-client";
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "aop";
+	};
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index c8dc1f4..09405ee 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -97,6 +97,7 @@
 
 			qcom,boost-threshold-ua = <100000>;
 			qcom,wipower-max-uw = <5000000>;
+			dpdm-supply = <&qusb_phy0>;
 
 			qcom,thermal-mitigation
 					= <3000000 1500000 1000000 500000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
index b790c04..6dd1b749 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
@@ -222,4 +222,27 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 	};
+
+	/* ssr - inbound entry from mss */
+	smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to mss */
+	smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 51eecc6..21c40fc 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -1655,6 +1655,66 @@
 		qcom,pas-id = <0xf>;
 		qcom,firmware-name = "ipa_fws";
 	};
+
+	pil_modem: qcom,mss@4080000 {
+		compatible = "qcom,pil-q6v55-mss";
+		reg = <0x4080000 0x100>,
+		      <0x1f63000 0x008>,
+		      <0x1f65000 0x008>,
+		      <0x1f64000 0x008>,
+		      <0x4180000 0x020>,
+		      <0xc2b0000 0x004>,
+		      <0xb2e0100 0x004>,
+		      <0x4180044 0x004>;
+		reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+			    "halt_nc", "rmb_base", "restart_reg",
+			    "pdc_sync", "alt_reset";
+
+		clocks = <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_MSS_CFG_AHB_CLK>,
+			 <&clock_gcc GCC_MSS_Q6_MEMNOC_AXI_CLK>,
+			 <&clock_gcc GCC_BOOT_ROM_AHB_CLK>,
+			 <&clock_gcc GCC_MSS_GPLL0_DIV_CLK_SRC>,
+			 <&clock_gcc GCC_MSS_SNOC_AXI_CLK>,
+			 <&clock_gcc GCC_MSS_MFAB_AXIS_CLK>,
+			 <&clock_gcc GCC_PRNG_AHB_CLK>;
+		clock-names = "xo", "iface_clk", "bus_clk",
+			      "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+			      "mnoc_axi_clk", "prng_clk";
+		qcom,proxy-clock-names = "xo", "prng_clk";
+		qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
+					  "gpll0_mss_clk", "snoc_axi_clk",
+					  "mnoc_axi_clk";
+
+		interrupts = <0 266 1>;
+		vdd_cx-supply = <&pm660l_s3_level>;
+		vdd_cx-voltage = <RPMH_REGULATOR_LEVEL_TURBO>;
+		vdd_mx-supply = <&pm660l_s1_level>;
+		vdd_mx-uV = <RPMH_REGULATOR_LEVEL_TURBO>;
+		qcom,firmware-name = "modem";
+		qcom,pil-self-auth;
+		qcom,sysmon-id = <0>;
+		qcom,ssctl-instance-id = <0x12>;
+		qcom,override-acc;
+		qcom,qdsp6v65-1-0;
+		status = "ok";
+		memory-region = <&pil_modem_mem>;
+		qcom,mem-protect-id = <0xF>;
+
+		/* GPIO inputs from mss */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+		qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+		/* GPIO output to mss */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+		qcom,mba-mem@0 {
+			compatible = "qcom,pil-mba-mem";
+			memory-region = <&pil_mba_mem>;
+		};
+	};
 };
 
 #include "sdm670-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index fc80330..3f05846 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -218,7 +218,6 @@
 		clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
 
 		qcom,secure_align_mask = <0xfff>;
-		qcom,global_pt;
 		qcom,hyp_secure_alloc;
 
 		gfx3d_user: gfx3d_user {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index d5488de..ffac298 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -1674,6 +1674,8 @@
 
 		qcom,sdr104-wa;
 
+		qcom,restore-after-cx-collapse;
+
 		qcom,devfreq,freq-table = <50000000 200000000>;
 		clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
 			<&clock_gcc GCC_SDCC2_APPS_CLK>;
@@ -2980,6 +2982,7 @@
 		vdd-3.3-ch0-supply = <&pm8998_l25>;
 		qcom,vdd-0.8-cx-mx-config = <800000 800000>;
 		qcom,vdd-3.3-ch0-config = <3104000 3312000>;
+		qcom,smmu-s1-bypass;
 	};
 
 	qmi-tmd-devices {
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 624f069..55687b8 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -208,145 +208,10 @@
 
 #endif
 
-#ifdef CONFIG_SCHED_HMP
-
-static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	ssize_t rc;
-	int cpuid = cpu->dev.id;
-	unsigned int pwr_cost;
-
-	pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);
-
-	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
-
-	return rc;
-}
-
-static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t count)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	int err;
-	int cpuid = cpu->dev.id;
-	unsigned int pwr_cost;
-
-	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
-	if (err)
-		return err;
-
-	err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);
-
-	if (err >= 0)
-		err = count;
-
-	return err;
-}
-
-static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	ssize_t rc;
-	int cpuid = cpu->dev.id;
-	unsigned int pwr_cost;
-
-	pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);
-
-	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);
-
-	return rc;
-}
-
-static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t count)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	int err;
-	int cpuid = cpu->dev.id;
-	unsigned int pwr_cost;
-
-	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
-	if (err)
-		return err;
-
-	err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);
-
-	if (err >= 0)
-		err = count;
-
-	return err;
-}
-
-static ssize_t show_sched_cluser_wake_idle(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	ssize_t rc;
-	int cpuid = cpu->dev.id;
-	unsigned int wake_up_idle;
-
-	wake_up_idle = sched_get_cluster_wake_idle(cpuid);
-
-	rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", wake_up_idle);
-
-	return rc;
-}
-
-static ssize_t __ref store_sched_cluster_wake_idle(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t count)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	int err;
-	int cpuid = cpu->dev.id;
-	unsigned int wake_up_idle;
-
-	err = kstrtouint(strstrip((char *)buf), 0, &wake_up_idle);
-	if (err)
-		return err;
-
-	err = sched_set_cluster_wake_idle(cpuid, wake_up_idle);
-
-	if (err >= 0)
-		err = count;
-
-	return err;
-}
-
-static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
-					show_sched_static_cpu_pwr_cost,
-					store_sched_static_cpu_pwr_cost);
-static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
-					show_sched_static_cluster_pwr_cost,
-					store_sched_static_cluster_pwr_cost);
-static DEVICE_ATTR(sched_cluster_wake_up_idle, 0644,
-					show_sched_cluser_wake_idle,
-					store_sched_cluster_wake_idle);
-
-static struct attribute *hmp_sched_cpu_attrs[] = {
-	&dev_attr_sched_static_cpu_pwr_cost.attr,
-	&dev_attr_sched_static_cluster_pwr_cost.attr,
-	&dev_attr_sched_cluster_wake_up_idle.attr,
-	NULL
-};
-
-static struct attribute_group sched_hmp_cpu_attr_group = {
-	.attrs = hmp_sched_cpu_attrs,
-};
-
-#endif /* CONFIG_SCHED_HMP */
 static const struct attribute_group *common_cpu_attr_groups[] = {
 #ifdef CONFIG_KEXEC
 	&crash_note_cpu_attr_group,
 #endif
-#ifdef CONFIG_SCHED_HMP
-	&sched_hmp_cpu_attr_group,
-#endif
 #ifdef CONFIG_HOTPLUG_CPU
 	&cpu_isolated_attr_group,
 #endif
@@ -357,9 +222,6 @@
 #ifdef CONFIG_KEXEC
 	&crash_note_cpu_attr_group,
 #endif
-#ifdef CONFIG_SCHED_HMP
-	&sched_hmp_cpu_attr_group,
-#endif
 #ifdef CONFIG_HOTPLUG_CPU
 	&cpu_isolated_attr_group,
 #endif
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index bfc3648..f927756 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -708,6 +708,7 @@
 		} else {
 			BT_PWR_ERR("BT chip state is already :%d no change d\n"
 				, pwr_state);
+			ret = 0;
 		}
 		break;
 	default:
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index 161be78..ed3a743 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -13,7 +13,7 @@
 #define BTFM_SLIM_H
 #include <linux/slimbus/slimbus.h>
 
-#define BTFMSLIM_DBG(fmt, arg...)  pr_debug(fmt "\n", ## arg)
+#define BTFMSLIM_DBG(fmt, arg...)  pr_debug("%s: " fmt "\n", __func__, ## arg)
 #define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
 #define BTFMSLIM_ERR(fmt, arg...)  pr_err("%s: " fmt "\n", __func__, ## arg)
 
@@ -68,6 +68,7 @@
 
 	uint32_t num_rx_port;
 	uint32_t num_tx_port;
+	uint32_t sample_rate;
 
 	struct btfmslim_ch *rx_chs;
 	struct btfmslim_ch *tx_chs;
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 73a789c..791ea29 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -54,7 +54,7 @@
 	int ret;
 	struct btfmslim *btfmslim = dai->dev->platform_data;
 
-	BTFMSLIM_DBG("substream = %s  stream = %d dai name = %s",
+	BTFMSLIM_DBG("substream = %s  stream = %d dai->name = %s",
 		 substream->name, substream->stream, dai->name);
 	ret = btfm_slim_hw_init(btfmslim);
 	return ret;
@@ -63,10 +63,52 @@
 static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
 		struct snd_soc_dai *dai)
 {
+	int i;
 	struct btfmslim *btfmslim = dai->dev->platform_data;
+	struct btfmslim_ch *ch;
+	uint8_t rxport, grp = false, nchan = 1;
 
-	BTFMSLIM_DBG("substream = %s  stream = %d dai name = %s",
-		 substream->name, substream->stream, dai->name);
+	BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
+		dai->id, dai->rate);
+
+	switch (dai->id) {
+	case BTFM_FM_SLIM_TX:
+		grp = true; nchan = 2;
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_SLIM_TX:
+		ch = btfmslim->tx_chs;
+		rxport = 0;
+		break;
+	case BTFM_BT_SCO_A2DP_SLIM_RX:
+	case BTFM_BT_SPLIT_A2DP_SLIM_RX:
+		ch = btfmslim->rx_chs;
+		rxport = 1;
+		break;
+	case BTFM_SLIM_NUM_CODEC_DAIS:
+	default:
+		BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
+		return;
+	}
+
+	if (dai->id == BTFM_FM_SLIM_TX)
+		goto out;
+
+	/* Search for dai->id matched port handler */
+	for (i = 0; (i < BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != BTFM_SLIM_NUM_CODEC_DAIS) &&
+		(ch->id != dai->id); ch++, i++)
+		;
+
+	if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
+		(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
+		BTFMSLIM_ERR("ch is invalid!!");
+		return;
+	}
+
+	btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+out:
 	btfm_slim_hw_deinit(btfmslim);
 }
 
@@ -74,14 +116,14 @@
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
 {
-	BTFMSLIM_DBG("dai name = %s DAI-ID %x rate %d num_ch %d",
+	BTFMSLIM_DBG("dai->name = %s DAI-ID %x rate %d num_ch %d",
 		dai->name, dai->id, params_rate(params),
 		params_channels(params));
 
 	return 0;
 }
 
-int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
+static int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
 	struct snd_soc_dai *dai)
 {
 	int i, ret = -EINVAL;
@@ -89,9 +131,12 @@
 	struct btfmslim_ch *ch;
 	uint8_t rxport, grp = false, nchan = 1;
 
-	BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
+	BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
 		dai->id, dai->rate);
 
+	/* save sample rate */
+	btfmslim->sample_rate = dai->rate;
+
 	switch (dai->id) {
 	case BTFM_FM_SLIM_TX:
 		grp = true; nchan = 2;
@@ -129,15 +174,15 @@
 	return ret;
 }
 
-int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
+static int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
 	struct snd_soc_dai *dai)
 {
-	int i, ret = -EINVAL;
+	int ret = -EINVAL, i;
 	struct btfmslim *btfmslim = dai->dev->platform_data;
 	struct btfmslim_ch *ch;
 	uint8_t rxport, grp = false, nchan = 1;
 
-	BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
+	BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
 		dai->id, dai->rate);
 
 	switch (dai->id) {
@@ -158,7 +203,12 @@
 	case BTFM_SLIM_NUM_CODEC_DAIS:
 	default:
 		BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
-		return ret;
+		goto out;
+	}
+
+	if (dai->id != BTFM_FM_SLIM_TX) {
+		ret = 0;
+		goto out;
 	}
 
 	/* Search for dai->id matched port handler */
@@ -170,9 +220,12 @@
 	if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
 		(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
 		BTFMSLIM_ERR("ch is invalid!!");
-		return ret;
+		goto out;
 	}
-	ret = btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+
+	btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+
+out:
 	return ret;
 }
 
@@ -282,6 +335,9 @@
 		*tx_num = 0;
 		*rx_num = num;
 		break;
+	default:
+		BTFMSLIM_ERR("Unsupported DAI %d", dai->id);
+		return -EINVAL;
 	}
 
 	do {
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index 72e28da..77e2973 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -39,6 +39,7 @@
 {
 	int ret = 0;
 	uint8_t reg_val;
+	uint16_t reg;
 
 	BTFMSLIM_DBG("");
 
@@ -46,20 +47,20 @@
 		return -EINVAL;
 
 	/* Get SB_SLAVE_HW_REV_MSB value*/
-	ret = btfm_slim_read(btfmslim, CHRK_SB_SLAVE_HW_REV_MSB,  1,
-		&reg_val, IFD);
+	reg = CHRK_SB_SLAVE_HW_REV_MSB;
+	ret = btfm_slim_read(btfmslim, reg,  1, &reg_val, IFD);
 	if (ret) {
-		BTFMSLIM_ERR("failed to read (%d)", ret);
+		BTFMSLIM_ERR("failed to read (%d) reg 0x%x", ret, reg);
 		goto error;
 	}
 	BTFMSLIM_DBG("Major Rev: 0x%x, Minor Rev: 0x%x",
 		(reg_val & 0xF0) >> 4, (reg_val & 0x0F));
 
 	/* Get SB_SLAVE_HW_REV_LSB value*/
-	ret = btfm_slim_read(btfmslim, CHRK_SB_SLAVE_HW_REV_LSB,  1,
-		&reg_val, IFD);
+	reg = CHRK_SB_SLAVE_HW_REV_LSB;
+	ret = btfm_slim_read(btfmslim, reg,  1, &reg_val, IFD);
 	if (ret) {
-		BTFMSLIM_ERR("failed to read (%d)", ret);
+		BTFMSLIM_ERR("failed to read (%d) reg 0x%x", ret, reg);
 		goto error;
 	}
 	BTFMSLIM_DBG("Step Rev: 0x%x", reg_val);
@@ -68,62 +69,87 @@
 	return ret;
 }
 
+static inline int is_fm_port(uint8_t port_num)
+{
+	if (port_num == CHRK_SB_PGD_PORT_TX1_FM ||
+		port_num == CHRK_SB_PGD_PORT_TX2_FM)
+		return 1;
+	else
+		return 0;
+}
 
 int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
 	uint8_t rxport, uint8_t enable)
 {
 	int ret = 0;
 	uint8_t reg_val = 0;
+	uint8_t port_bit = 0;
 	uint16_t reg;
 
 	BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
 	if (rxport) {
-		/* Port enable */
-		reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
-	} else { /* txport */
-		/* Multiple Channel Setting - only FM Tx will be multiple
-		 * channel
-		 */
-		if (enable && (port_num == CHRK_SB_PGD_PORT_TX1_FM ||
-			port_num == CHRK_SB_PGD_PORT_TX2_FM)) {
-
-			reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
-				(0x1 << CHRK_SB_PGD_PORT_TX2_FM);
-			reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+		if (enable && btfmslim->sample_rate == 48000) {
+			/* For A2DP Rx */
+			reg_val = 0x1;
+			port_bit = port_num - 0x10;
+			reg = CHRK_SB_PGD_RX_PORTn_MULTI_CHNL_0(port_bit);
+			BTFMSLIM_DBG("writing reg_val (%d) to reg(%x) for A2DP",
+					reg_val, reg);
 			ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
 			if (ret) {
-				BTFMSLIM_ERR("failed to write (%d)", ret);
+				BTFMSLIM_ERR("failed to write (%d) reg 0x%x",
+						ret, reg);
 				goto error;
 			}
 		}
+		/* Port enable */
+		reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
+		goto enable_disable_rxport;
+	}
+	if (!enable)
+		goto enable_disable_txport;
 
-		/* Enable Tx port hw auto recovery for underrun or
-		 * overrun error
-		 */
-		reg_val = (enable) ? (CHRK_ENABLE_OVERRUN_AUTO_RECOVERY |
-				CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY) : 0x0;
-
-		ret = btfm_slim_write(btfmslim,
-			CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(port_num), 1,
-			&reg_val, IFD);
+	/* txport */
+	/* Multiple Channel Setting */
+	if (is_fm_port(port_num)) {
+		reg_val = (0x1 << CHRK_SB_PGD_PORT_TX1_FM) |
+				(0x1 << CHRK_SB_PGD_PORT_TX2_FM);
+		reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num);
+		ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
 		if (ret) {
-			BTFMSLIM_ERR("failed to write (%d)", ret);
+			BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
 			goto error;
 		}
-
-		/* Port enable */
-		reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
 	}
 
-	if (enable)
-		/* Set water mark to 1 and enable the port */
-		reg_val = CHRK_SB_PGD_PORT_ENABLE | CHRK_SB_PGD_PORT_WM_LB;
-	else
+	/* Enable Tx port hw auto recovery for underrun or overrun error */
+	reg_val = (CHRK_ENABLE_OVERRUN_AUTO_RECOVERY |
+				CHRK_ENABLE_UNDERRUN_AUTO_RECOVERY);
+	reg = CHRK_SB_PGD_PORT_TX_OR_UR_CFGN(port_num);
+	ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
+	if (ret) {
+		BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
+		goto error;
+	}
+
+enable_disable_txport:
+	/* Port enable */
+	reg = CHRK_SB_PGD_PORT_TX_CFGN(port_num);
+
+enable_disable_rxport:
+	if (enable) {
+		if (is_fm_port(port_num))
+			reg_val = CHRK_SB_PGD_PORT_ENABLE |
+					CHRK_SB_PGD_PORT_WM_L3;
+		else
+			reg_val = CHRK_SB_PGD_PORT_ENABLE |
+					CHRK_SB_PGD_PORT_WM_LB;
+	} else
 		reg_val = CHRK_SB_PGD_PORT_DISABLE;
 
 	ret = btfm_slim_write(btfmslim, reg, 1, &reg_val, IFD);
 	if (ret)
-		BTFMSLIM_ERR("failed to write (%d)", ret);
+		BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg);
 
 error:
 	return ret;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 13fac71..7bc263c 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -211,6 +211,7 @@
 	struct device *dev;
 	struct fastrpc_session_ctx session[NUM_SESSIONS];
 	struct completion work;
+	struct completion workport;
 	struct notifier_block nb;
 	struct kref kref;
 	int sesscount;
@@ -1378,6 +1379,7 @@
 	me->channel = &gcinfo[0];
 	for (i = 0; i < NUM_CHANNELS; i++) {
 		init_completion(&me->channel[i].work);
+		init_completion(&me->channel[i].workport);
 		me->channel[i].sesscount = 0;
 	}
 }
@@ -1828,7 +1830,7 @@
 	switch (event) {
 	case GLINK_CONNECTED:
 		link->port_state = FASTRPC_LINK_CONNECTED;
-		complete(&me->channel[cid].work);
+		complete(&me->channel[cid].workport);
 		break;
 	case GLINK_LOCAL_DISCONNECTED:
 		link->port_state = FASTRPC_LINK_DISCONNECTED;
@@ -1978,8 +1980,7 @@
 		return;
 	link = &gfa.channel[cid].link;
 
-	if (link->port_state == FASTRPC_LINK_CONNECTED ||
-		link->port_state == FASTRPC_LINK_CONNECTING) {
+	if (link->port_state == FASTRPC_LINK_CONNECTED) {
 		link->port_state = FASTRPC_LINK_DISCONNECTING;
 		glink_close(chan);
 	}
@@ -2161,7 +2162,8 @@
 		if (err)
 			goto bail;
 
-		VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
+		VERIFY(err,
+			 wait_for_completion_timeout(&me->channel[cid].workport,
 						RPC_TIMEOUT));
 		if (err) {
 			me->channel[cid].chan = 0;
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 12eb6d8..a6edf2f 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -699,7 +699,8 @@
 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
 	cpumask_set_cpu(max_cpu, &speedchange_cpumask);
 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
-	wake_up_process_no_notif(speedchange_task);
+
+	wake_up_process(speedchange_task);
 
 rearm:
 	cpufreq_interactive_timer_resched(data, false);
@@ -814,7 +815,7 @@
 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
 
 	if (anyboost)
-		wake_up_process_no_notif(speedchange_task);
+		wake_up_process(speedchange_task);
 }
 
 static int load_change_callback(struct notifier_block *nb, unsigned long val,
@@ -1926,7 +1927,7 @@
 	get_task_struct(speedchange_task);
 
 	/* NB: wake up so the thread does not look hung to the freezer */
-	wake_up_process_no_notif(speedchange_task);
+	wake_up_process(speedchange_task);
 
 	return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
 }
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index b058bdd..58448ca 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -55,6 +55,12 @@
 #define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
 #define MMSS_VBIF_TEST_BUS_OUT		0x230
 
+/* Vbif error info */
+#define MMSS_VBIF_XIN_HALT_CTRL1	0x204
+#define MMSS_VBIF_ERR_INFO		0X1a0
+#define MMSS_VBIF_ERR_INFO_1		0x1a4
+#define MMSS_VBIF_CLIENT_NUM		14
+
 /* print debug ranges in groups of 4 u32s */
 #define REG_DUMP_ALIGN		16
 
@@ -2366,7 +2372,8 @@
 	bool in_log, in_mem;
 	u32 **dump_mem = NULL;
 	u32 *dump_addr = NULL;
-	u32 value;
+	u32 value, d0, d1;
+	unsigned long reg;
 	struct vbif_debug_bus_entry *head;
 	phys_addr_t phys = 0;
 	int i, list_size = 0;
@@ -2439,6 +2446,30 @@
 	/* make sure that vbif core is on */
 	wmb();
 
+	/**
+	 * Extract VBIF error info based on XIN halt status.
+	 * If the XIN client is not in HALT state, then retrieve the
+	 * VBIF error info for it.
+	 */
+	reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+	dev_err(sde_dbg_base.dev, "XIN HALT:0x%lX\n", reg);
+	reg >>= 16;
+	for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+		if (!test_bit(0, &reg)) {
+			writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+			/* make sure reg write goes through */
+			wmb();
+
+			d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+			d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+			dev_err(sde_dbg_base.dev,
+					"Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+					i, d0, d1);
+		}
+		reg >>= 1;
+	}
+
 	for (i = 0; i < bus_size; i++) {
 		head = dbg_bus + i;
 
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3e9448b..0bf89b4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -495,11 +495,6 @@
 	if (!clk_scaling->enable)
 		goto out;
 
-	if (*freq == UINT_MAX)
-		*freq = clk_scaling->freq_table[1];
-	else
-		*freq = clk_scaling->freq_table[0];
-
 	pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
 		*freq, current->comm);
 
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 8641a45..486e8c3 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -41,6 +41,7 @@
 #define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
 #define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
+#define FCC_CHANGE_VOTER		"FCC_CHANGE_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -288,69 +289,11 @@
 	__ATTR_NULL,
 };
 
-/***********
- *  TAPER  *
- ************/
-#define MINIMUM_PARALLEL_FCC_UA		500000
-#define PL_TAPER_WORK_DELAY_MS		100
-#define TAPER_RESIDUAL_PCT		75
-static void pl_taper_work(struct work_struct *work)
-{
-	struct pl_data *chip = container_of(work, struct pl_data,
-						pl_taper_work.work);
-	union power_supply_propval pval = {0, };
-	int rc;
-
-	/* exit immediately if parallel is disabled */
-	if (get_effective_result(chip->pl_disable_votable)) {
-		pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
-		goto done;
-	}
-
-	pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
-			chip->slave_fcc_ua);
-	if (chip->slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
-		pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
-		vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
-		goto done;
-	}
-
-	rc = power_supply_get_property(chip->batt_psy,
-			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
-	if (rc < 0) {
-		pr_err("Couldn't get batt charge type rc=%d\n", rc);
-		goto done;
-	}
-
-	chip->charge_type = pval.intval;
-	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
-		pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
-
-		vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
-		/* Reduce the taper percent by 25 percent */
-		chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
-		rerun_election(chip->fcc_votable);
-		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
-				PL_TAPER_WORK_DELAY_MS);
-		schedule_delayed_work(&chip->pl_taper_work,
-				msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
-		return;
-	}
-
-	/*
-	 * Master back to Fast Charge, get out of this round of taper reduction
-	 */
-	pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
-
-done:
-	vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
-}
-
 /*********
  *  FCC  *
  **********/
 #define EFFICIENCY_PCT	80
-static void split_fcc(struct pl_data *chip, int total_ua,
+static void get_fcc_split(struct pl_data *chip, int total_ua,
 			int *master_ua, int *slave_ua)
 {
 	int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
@@ -389,7 +332,7 @@
 	effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
 	slave_limited_ua = min(effective_total_ua, bcl_ua);
 	*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
-	*slave_ua = (*slave_ua * chip->taper_pct) / 100;
+
 	/*
 	 * In USBIN_USBIN configuration with internal rsense parallel
 	 * charger's current goes through main charger's BATFET, keep
@@ -399,14 +342,75 @@
 		*master_ua = max(0, total_ua);
 	else
 		*master_ua = max(0, total_ua - *slave_ua);
+
+	/* further reduce slave's share in accordance with taper reductions */
+	*slave_ua = (*slave_ua * chip->taper_pct) / 100;
+}
+
+#define MINIMUM_PARALLEL_FCC_UA		500000
+#define PL_TAPER_WORK_DELAY_MS		100
+#define TAPER_RESIDUAL_PCT		90
+static void pl_taper_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work, struct pl_data,
+						pl_taper_work.work);
+	union power_supply_propval pval = {0, };
+	int total_fcc_ua, master_fcc_ua, slave_fcc_ua;
+	int rc;
+
+	/* exit immediately if parallel is disabled */
+	if (get_effective_result(chip->pl_disable_votable)) {
+		pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
+		goto done;
+	}
+
+	total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
+	get_fcc_split(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+	if (slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
+		pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
+		vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
+		goto done;
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
+		slave_fcc_ua);
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		goto done;
+	}
+
+	chip->charge_type = pval.intval;
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+		pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
+
+		vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+		/* Reduce the taper percent by 10 percent */
+		chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
+		rerun_election(chip->fcc_votable);
+		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
+				PL_TAPER_WORK_DELAY_MS);
+		schedule_delayed_work(&chip->pl_taper_work,
+				msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
+		return;
+	}
+
+	/*
+	 * Master back to Fast Charge, get out of this round of taper reduction
+	 */
+	pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+
+done:
+	vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
 }
 
 static int pl_fcc_vote_callback(struct votable *votable, void *data,
 			int total_fcc_ua, const char *client)
 {
 	struct pl_data *chip = data;
-	union power_supply_propval pval = {0, };
-	int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+	int master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
 
 	if (total_fcc_ua < 0)
 		return 0;
@@ -414,41 +418,23 @@
 	if (!chip->main_psy)
 		return 0;
 
-	if (chip->pl_mode == POWER_SUPPLY_PL_NONE
-	    || get_effective_result_locked(chip->pl_disable_votable)) {
-		pval.intval = total_fcc_ua;
-		rc = power_supply_set_property(chip->main_psy,
-				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-				&pval);
-		if (rc < 0)
-			pr_err("Couldn't set main fcc, rc=%d\n", rc);
-		return rc;
-	}
-
 	if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
-		split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+		get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+				&slave_fcc_ua);
 
-		pval.intval = slave_fcc_ua;
-		rc = power_supply_set_property(chip->pl_psy,
-				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-				&pval);
-		if (rc < 0) {
-			pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
-			return rc;
-		}
-
-		chip->slave_fcc_ua = slave_fcc_ua;
-
-		pval.intval = master_fcc_ua;
-		rc = power_supply_set_property(chip->main_psy,
-				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
-				&pval);
-		if (rc < 0) {
-			pr_err("Could not set main fcc, rc=%d\n", rc);
-			return rc;
+		if (slave_fcc_ua > 500000) {
+			chip->slave_fcc_ua = slave_fcc_ua;
+			vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+							false, 0);
+		} else {
+			chip->slave_fcc_ua = 0;
+			vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+							true, 0);
 		}
 	}
 
+	rerun_election(chip->pl_disable_votable);
+
 	pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
 		   master_fcc_ua, slave_fcc_ua,
 		   (master_fcc_ua * 100) / total_fcc_ua,
@@ -577,18 +563,34 @@
 		vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
 }
 
+static bool is_main_available(struct pl_data *chip)
+{
+	if (chip->main_psy)
+		return true;
+
+	chip->main_psy = power_supply_get_by_name("main");
+
+	return !!chip->main_psy;
+}
+
 static int pl_disable_vote_callback(struct votable *votable,
 		void *data, int pl_disable, const char *client)
 {
 	struct pl_data *chip = data;
 	union power_supply_propval pval = {0, };
+	int master_fcc_ua, total_fcc_ua, slave_fcc_ua;
 	int rc;
 
-	chip->taper_pct = 100;
 	chip->total_settled_ua = 0;
 	chip->pl_settled_ua = 0;
 
-	if (!pl_disable) { /* enable */
+	if (!is_main_available(chip))
+		return -ENODEV;
+
+	total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
+
+	if (chip->pl_mode != POWER_SUPPLY_PL_NONE && !pl_disable) {
+		 /* enable parallel charging */
 		rc = power_supply_get_property(chip->pl_psy,
 				POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
 		if (rc == -ENODEV) {
@@ -602,7 +604,30 @@
 		}
 
 		rerun_election(chip->fv_votable);
-		rerun_election(chip->fcc_votable);
+
+		get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+				&slave_fcc_ua);
+
+		chip->slave_fcc_ua = slave_fcc_ua;
+
+		pval.intval = master_fcc_ua;
+		rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+		if (rc < 0) {
+			pr_err("Could not set main fcc, rc=%d\n", rc);
+			return rc;
+		}
+
+		pval.intval = slave_fcc_ua;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+		if (rc < 0) {
+			pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
+			return rc;
+		}
+
 		/*
 		 * Enable will be called with a valid pl_psy always. The
 		 * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
@@ -647,7 +672,17 @@
 				pr_err("Couldn't change slave suspend state rc=%d\n",
 					rc);
 		}
-		rerun_election(chip->fcc_votable);
+
+		/* main psy gets all share */
+		pval.intval = total_fcc_ua;
+		rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+		if (rc < 0) {
+			pr_err("Could not set main fcc, rc=%d\n", rc);
+			return rc;
+		}
+
 		rerun_election(chip->fv_votable);
 	}
 
@@ -681,16 +716,6 @@
 	return 0;
 }
 
-static bool is_main_available(struct pl_data *chip)
-{
-	if (chip->main_psy)
-		return true;
-
-	chip->main_psy = power_supply_get_by_name("main");
-
-	return !!chip->main_psy;
-}
-
 static bool is_batt_available(struct pl_data *chip)
 {
 	if (!chip->batt_psy)
@@ -835,6 +860,7 @@
 	else
 		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
 
+	rerun_election(chip->fcc_votable);
 
 	if (get_effective_result(chip->pl_disable_votable))
 		return;
@@ -856,8 +882,6 @@
 		if (abs(new_total_settled_ua - chip->total_settled_ua)
 						> MIN_ICL_CHANGE_DELTA_UA)
 			split_settled(chip);
-	} else {
-		rerun_election(chip->fcc_votable);
 	}
 }
 
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index ae44f01..3f26e5e 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -3705,6 +3705,7 @@
 
 	/* reset parallel voters */
 	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+	vote(chg->pl_disable_votable, FCC_CHANGE_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
 	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 5c7819e..c08d404 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -66,6 +66,7 @@
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define WEAK_CHARGER_VOTER		"WEAK_CHARGER_VOTER"
 #define OTG_VOTER			"OTG_VOTER"
+#define FCC_CHANGE_VOTER		"FCC_CHANGE_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
 #define OTG_MAX_ATTEMPTS	3
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a2b5ea0..370e092 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7833,6 +7833,11 @@
 		ufshcd_set_auto_hibern8_timer(hba,
 				      hba->hibern8_on_idle.delay_ms);
 out:
+	if (ret) {
+		ufshcd_set_ufs_dev_poweroff(hba);
+		ufshcd_set_link_off(hba);
+	}
+
 	/*
 	 * If we failed to initialize the device or the device is not
 	 * present, turn off the power/clocks etc.
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 1f7286b..5be06ba 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -743,4 +743,12 @@
 	  kernel panic. On certain MSM SoCs, this provides us
 	  additional debugging information.
 
+config QMP_DEBUGFS_CLIENT
+	bool "Debugfs Client to communicate with AOP using QMP protocol"
+	depends on DEBUG_FS
+	default n
+	help
+	  This options enables a driver which allows clients to send messages
+	  to Alway On processor using QMP transport.
+
 source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 7820f8b..9736cdc 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -78,3 +78,4 @@
 obj-$(CONFIG_QCOM_DCC_V2) += dcc_v2.o
 obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
 obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
+obj-$(CONFIG_QMP_DEBUGFS_CLIENT) += qmp-debugfs-client.o
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 405d05c..a8dc044 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -269,6 +269,7 @@
 	ICNSS_WLFW_EXISTS,
 	ICNSS_WDOG_BITE,
 	ICNSS_SHUTDOWN_DONE,
+	ICNSS_HOST_TRIGGERED_PDR,
 };
 
 struct ce_irq_list {
@@ -321,6 +322,13 @@
 		uint32_t disable;
 	} ce_irqs[ICNSS_MAX_IRQ_REGISTRATIONS];
 
+	struct {
+		uint32_t pdr_fw_crash;
+		uint32_t pdr_host_error;
+		uint32_t root_pd_crash;
+		uint32_t root_pd_shutdown;
+	} recovery;
+
 	uint32_t pm_suspend;
 	uint32_t pm_suspend_err;
 	uint32_t pm_resume;
@@ -362,7 +370,6 @@
 	uint32_t rejuvenate_ack_req;
 	uint32_t rejuvenate_ack_resp;
 	uint32_t rejuvenate_ack_err;
-	uint32_t trigger_recovery;
 };
 
 #define MAX_NO_OF_MAC_ADDR 4
@@ -371,6 +378,20 @@
 	uint32_t no_of_mac_addr_set;
 };
 
+enum icnss_pdr_cause_index {
+	ICNSS_FW_CRASH,
+	ICNSS_ROOT_PD_CRASH,
+	ICNSS_ROOT_PD_SHUTDOWN,
+	ICNSS_HOST_ERROR,
+};
+
+static const char * const icnss_pdr_cause[] = {
+	[ICNSS_FW_CRASH] = "FW crash",
+	[ICNSS_ROOT_PD_CRASH] = "Root PD crashed",
+	[ICNSS_ROOT_PD_SHUTDOWN] = "Root PD shutdown",
+	[ICNSS_HOST_ERROR] = "Host error",
+};
+
 struct service_notifier_context {
 	void *handle;
 	uint32_t instance_id;
@@ -440,6 +461,7 @@
 	u8 requesting_sub_system;
 	u16 line_number;
 	char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+	struct mutex dev_lock;
 } *penv;
 
 #ifdef CONFIG_ICNSS_DEBUG
@@ -2460,6 +2482,11 @@
 	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
 		      priv->state, notif->crashed);
 
+	if (notif->crashed)
+		priv->stats.recovery.root_pd_crash++;
+	else
+		priv->stats.recovery.root_pd_shutdown++;
+
 	icnss_ignore_qmi_timeout(true);
 
 	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
@@ -2539,6 +2566,7 @@
 	enum pd_subsys_state *state = data;
 	struct icnss_event_pd_service_down_data *event_data;
 	struct icnss_uevent_fw_down_data fw_down_data;
+	enum icnss_pdr_cause_index cause = ICNSS_ROOT_PD_CRASH;
 
 	icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
 		     notification, priv->state);
@@ -2553,26 +2581,40 @@
 
 	if (state == NULL) {
 		event_data->crashed = true;
+		priv->stats.recovery.root_pd_crash++;
 		goto event_post;
 	}
 
-	icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx\n",
-		      *state, priv->state);
-
 	switch (*state) {
 	case ROOT_PD_WDOG_BITE:
 		event_data->crashed = true;
 		event_data->wdog_bite = true;
+		priv->stats.recovery.root_pd_crash++;
 		break;
 	case ROOT_PD_SHUTDOWN:
+		cause = ICNSS_ROOT_PD_SHUTDOWN;
+		priv->stats.recovery.root_pd_shutdown++;
+		break;
+	case USER_PD_STATE_CHANGE:
+		if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
+			cause = ICNSS_HOST_ERROR;
+			priv->stats.recovery.pdr_host_error++;
+		} else {
+			cause = ICNSS_FW_CRASH;
+			priv->stats.recovery.pdr_fw_crash++;
+		}
 		break;
 	default:
 		event_data->crashed = true;
+		priv->stats.recovery.root_pd_crash++;
 		break;
 	}
 
+	icnss_pr_info("PD service down, pd_state: %d, state: 0x%lx: cause: %s\n",
+		      *state, priv->state, icnss_pdr_cause[cause]);
 event_post:
 	icnss_ignore_qmi_timeout(true);
+	clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
 
 	fw_down_data.crashed = event_data->crashed;
 	icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
@@ -3315,7 +3357,6 @@
 	WARN_ON(1);
 	icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
 		      priv->state);
-	priv->stats.trigger_recovery++;
 
 	/*
 	 * Initiate PDR, required only for the first instance
@@ -3323,6 +3364,9 @@
 	ret = service_notif_pd_restart(priv->service_notifier[0].name,
 		priv->service_notifier[0].instance_id);
 
+	if (!ret)
+		set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
+
 out:
 	return ret;
 }
@@ -3770,6 +3814,9 @@
 		case ICNSS_SHUTDOWN_DONE:
 			seq_puts(s, "SHUTDOWN DONE");
 			continue;
+		case ICNSS_HOST_TRIGGERED_PDR:
+			seq_puts(s, "HOST TRIGGERED PDR");
+			continue;
 		}
 
 		seq_printf(s, "UNKNOWN-%d", i);
@@ -3889,7 +3936,10 @@
 	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req);
 	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp);
 	ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err);
-	ICNSS_STATS_DUMP(s, priv, trigger_recovery);
+	ICNSS_STATS_DUMP(s, priv, recovery.pdr_fw_crash);
+	ICNSS_STATS_DUMP(s, priv, recovery.pdr_host_error);
+	ICNSS_STATS_DUMP(s, priv, recovery.root_pd_crash);
+	ICNSS_STATS_DUMP(s, priv, recovery.root_pd_shutdown);
 
 	seq_puts(s, "\n<------------------ PM stats ------------------->\n");
 	ICNSS_STATS_DUMP(s, priv, pm_suspend);
@@ -4020,12 +4070,14 @@
 {
 	struct icnss_priv *priv = s->private;
 
+	mutex_lock(&priv->dev_lock);
 	if (!priv->diag_reg_read_buf) {
 		seq_puts(s, "Usage: echo <mem_type> <offset> <data_len> > <debugfs>/icnss/reg_read\n");
 
 		if (!test_bit(ICNSS_FW_READY, &priv->state))
 			seq_puts(s, "Firmware is not ready yet!, wait for FW READY\n");
 
+		mutex_unlock(&priv->dev_lock);
 		return 0;
 	}
 
@@ -4039,6 +4091,7 @@
 	priv->diag_reg_read_len = 0;
 	kfree(priv->diag_reg_read_buf);
 	priv->diag_reg_read_buf = NULL;
+	mutex_unlock(&priv->dev_lock);
 
 	return 0;
 }
@@ -4099,18 +4152,22 @@
 	    data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
 		return -EINVAL;
 
+	mutex_lock(&priv->dev_lock);
 	kfree(priv->diag_reg_read_buf);
 	priv->diag_reg_read_buf = NULL;
 
 	reg_buf = kzalloc(data_len, GFP_KERNEL);
-	if (!reg_buf)
+	if (!reg_buf) {
+		mutex_unlock(&priv->dev_lock);
 		return -ENOMEM;
+	}
 
 	ret = wlfw_athdiag_read_send_sync_msg(priv, reg_offset,
 					      mem_type, data_len,
 					      reg_buf);
 	if (ret) {
 		kfree(reg_buf);
+		mutex_unlock(&priv->dev_lock);
 		return ret;
 	}
 
@@ -4118,6 +4175,7 @@
 	priv->diag_reg_read_mem_type = mem_type;
 	priv->diag_reg_read_len = data_len;
 	priv->diag_reg_read_buf = reg_buf;
+	mutex_unlock(&priv->dev_lock);
 
 	return count;
 }
@@ -4397,6 +4455,7 @@
 
 	spin_lock_init(&priv->event_lock);
 	spin_lock_init(&priv->on_off_lock);
+	mutex_init(&priv->dev_lock);
 
 	priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
 	if (!priv->event_wq) {
diff --git a/drivers/soc/qcom/qmp-debugfs-client.c b/drivers/soc/qcom/qmp-debugfs-client.c
new file mode 100644
index 0000000..578e7f0
--- /dev/null
+++ b/drivers/soc/qcom/qmp-debugfs-client.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mailbox_client.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox/qmp.h>
+#include <linux/uaccess.h>
+
+#define MAX_MSG_SIZE 96 /* Imposed by the remote*/
+
+static struct mbox_chan *chan;
+static struct mbox_client *cl;
+
+static ssize_t aop_msg_write(struct file *file, const char __user *userstr,
+		size_t len, loff_t *pos)
+{
+	char buf[MAX_MSG_SIZE + 1] = {0};
+	struct qmp_pkt pkt;
+	int rc;
+
+	if (!len || (len > MAX_MSG_SIZE))
+		return len;
+
+	rc  = copy_from_user(buf, userstr, len);
+	if (rc) {
+		pr_err("%s copy from user failed, rc=%d\n", __func__, rc);
+		return len;
+	}
+
+	/*
+	 * Controller expects a 4 byte aligned buffer
+	 */
+	pkt.size = (len + 0x3) & ~0x3;
+	pkt.data = buf;
+
+	if (mbox_send_message(chan, &pkt) < 0)
+		pr_err("Failed to send qmp request\n");
+
+	return len;
+}
+
+static const struct file_operations aop_msg_fops = {
+	.write = aop_msg_write,
+};
+
+static int qmp_msg_probe(struct platform_device *pdev)
+{
+	struct dentry *file;
+
+	cl = devm_kzalloc(&pdev->dev, sizeof(*cl), GFP_KERNEL);
+	if (!cl)
+		return -ENOMEM;
+
+	cl->dev = &pdev->dev;
+	cl->tx_block = true;
+	cl->tx_tout = 100;
+	cl->knows_txdone = false;
+
+	chan = mbox_request_channel(cl, 0);
+	if (IS_ERR(chan)) {
+		dev_err(&pdev->dev, "Failed to mbox channel\n");
+		return PTR_ERR(chan);
+	}
+
+	file = debugfs_create_file("aop_send_message", 0220, NULL, NULL,
+			&aop_msg_fops);
+	if (!file)
+		goto err;
+	return 0;
+err:
+	mbox_free_channel(chan);
+	chan = NULL;
+	return -ENOMEM;
+}
+
+static const struct of_device_id aop_qmp_match_tbl[] = {
+	{.compatible = "qcom,debugfs-qmp-client"},
+	{},
+};
+
+static struct platform_driver aop_qmp_msg_driver = {
+	.probe = qmp_msg_probe,
+	.driver = {
+		.name = "debugfs-qmp-client",
+		.owner = THIS_MODULE,
+		.of_match_table = aop_qmp_match_tbl,
+	},
+};
+
+builtin_platform_driver(aop_qmp_msg_driver);
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index ba92ed9..aa2d2d7 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -293,6 +293,7 @@
 
 int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
 int get_secure_vmid(unsigned long flags);
+int get_vmid(unsigned long flags);
 bool is_secure_vmid_valid(int vmid);
 unsigned int count_set_bits(unsigned long val);
 int populate_vm_list(unsigned long flags, unsigned int *vm_list, int nelems);
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index a9ebfed..c7b58ce 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -638,7 +638,7 @@
 
 	flags = flags & ION_FLAGS_CP_MASK;
 	for_each_set_bit(itr, &flags, BITS_PER_LONG) {
-		vmid = get_secure_vmid(0x1UL << itr);
+		vmid = get_vmid(0x1UL << itr);
 		if (vmid < 0 || !nelems)
 			return -EINVAL;
 
@@ -674,6 +674,19 @@
 		return VMID_CP_SPSS_HLOS_SHARED;
 	return -EINVAL;
 }
+
+int get_vmid(unsigned long flags)
+{
+	int vmid;
+
+	vmid = get_secure_vmid(flags);
+	if (vmid < 0) {
+		if (flags & ION_FLAG_CP_HLOS)
+			vmid = VMID_HLOS;
+	}
+	return vmid;
+}
+
 /* fix up the cases where the ioctl direction bits are incorrect */
 static unsigned int msm_ion_ioctl_dir(unsigned int cmd)
 {
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 46df732..a7cb586 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -346,6 +346,7 @@
 	struct acc_dev	*dev = ep->driver_data;
 	char *string_dest = NULL;
 	int length = req->actual;
+	unsigned long flags;
 
 	if (req->status != 0) {
 		pr_err("acc_complete_set_string, err %d\n", req->status);
@@ -371,22 +372,26 @@
 	case ACCESSORY_STRING_SERIAL:
 		string_dest = dev->serial;
 		break;
-	}
-	if (string_dest) {
-		unsigned long flags;
-
-		if (length >= ACC_STRING_SIZE)
-			length = ACC_STRING_SIZE - 1;
-
-		spin_lock_irqsave(&dev->lock, flags);
-		memcpy(string_dest, req->buf, length);
-		/* ensure zero termination */
-		string_dest[length] = 0;
-		spin_unlock_irqrestore(&dev->lock, flags);
-	} else {
+	default:
 		pr_err("unknown accessory string index %d\n",
-			dev->string_index);
+					dev->string_index);
+		return;
 	}
+
+	if (!length) {
+		pr_debug("zero length for accessory string index %d\n",
+						dev->string_index);
+		return;
+	}
+
+	if (length >= ACC_STRING_SIZE)
+		length = ACC_STRING_SIZE - 1;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	memcpy(string_dest, req->buf, length);
+	/* ensure zero termination */
+	string_dest[length] = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
 }
 
 static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 675e50e..37d904f 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -79,6 +79,7 @@
 
 struct qusb_phy {
 	struct usb_phy		phy;
+	struct mutex		lock;
 	void __iomem		*base;
 	void __iomem		*efuse_reg;
 
@@ -103,11 +104,11 @@
 	int			efuse_bit_pos;
 	int			efuse_num_of_bits;
 
-	bool			power_enabled;
+	int			power_enabled_ref;
 	bool			clocks_enabled;
 	bool			cable_connected;
 	bool			suspended;
-	bool			rm_pulldown;
+	bool			dpdm_enable;
 
 	struct regulator_desc	dpdm_rdesc;
 	struct regulator_dev	*dpdm_rdev;
@@ -171,35 +172,47 @@
 	return ret;
 }
 
-static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
-						bool toggle_vdd)
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
 {
 	int ret = 0;
 
-	dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
-			__func__, on ? "on" : "off", qphy->power_enabled);
+	mutex_lock(&qphy->lock);
 
-	if (toggle_vdd && qphy->power_enabled == on) {
-		dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
-		return 0;
+	dev_dbg(qphy->phy.dev,
+		"%s:req to turn %s regulators. power_enabled_ref:%d\n",
+			__func__, on ? "on" : "off", qphy->power_enabled_ref);
+
+	if (on && ++qphy->power_enabled_ref > 1) {
+		dev_dbg(qphy->phy.dev, "PHYs' regulators are already on\n");
+		goto done;
 	}
 
-	if (!on)
-		goto disable_vdda33;
-
-	if (toggle_vdd) {
-		ret = qusb_phy_config_vdd(qphy, true);
-		if (ret) {
-			dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
-								ret);
-			goto err_vdd;
+	if (!on) {
+		if (on == qphy->power_enabled_ref) {
+			dev_dbg(qphy->phy.dev,
+				"PHYs' regulators are already off\n");
+			goto done;
 		}
 
-		ret = regulator_enable(qphy->vdd);
-		if (ret) {
-			dev_err(qphy->phy.dev, "Unable to enable VDD\n");
-			goto unconfig_vdd;
-		}
+		qphy->power_enabled_ref--;
+		if (!qphy->power_enabled_ref)
+			goto disable_vdda33;
+
+		dev_dbg(qphy->phy.dev, "Skip turning off PHYs' regulators\n");
+		goto done;
+	}
+
+	ret = qusb_phy_config_vdd(qphy, true);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+							ret);
+		goto err_vdd;
+	}
+
+	ret = regulator_enable(qphy->vdd);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+		goto unconfig_vdd;
 	}
 
 	ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
@@ -242,10 +255,9 @@
 		goto unset_vdd33;
 	}
 
-	if (toggle_vdd)
-		qphy->power_enabled = true;
-
 	pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+
+	mutex_unlock(&qphy->lock);
 	return ret;
 
 disable_vdda33:
@@ -281,22 +293,24 @@
 		dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
 
 disable_vdd:
-	if (toggle_vdd) {
-		ret = regulator_disable(qphy->vdd);
-		if (ret)
-			dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
-								ret);
+	ret = regulator_disable(qphy->vdd);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+							ret);
 
 unconfig_vdd:
-		ret = qusb_phy_config_vdd(qphy, false);
-		if (ret)
-			dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
-								ret);
-	}
+	ret = qusb_phy_config_vdd(qphy, false);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+							ret);
 err_vdd:
-	if (toggle_vdd)
-		qphy->power_enabled = false;
 	dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+
+	/* in case of error in turning on regulators */
+	if (qphy->power_enabled_ref)
+		qphy->power_enabled_ref--;
+done:
+	mutex_unlock(&qphy->lock);
 	return ret;
 }
 
@@ -394,7 +408,7 @@
 
 	dev_dbg(phy->dev, "%s\n", __func__);
 
-	ret = qusb_phy_enable_power(qphy, true, true);
+	ret = qusb_phy_enable_power(qphy, true);
 	if (ret)
 		return ret;
 
@@ -576,7 +590,7 @@
 				qphy->base + qphy->phy_reg[INTR_CTRL]);
 			qusb_phy_reset(qphy);
 			qusb_phy_enable_clocks(qphy, false);
-			qusb_phy_enable_power(qphy, false, true);
+			qusb_phy_enable_power(qphy, false);
 		}
 		qphy->suspended = true;
 	} else {
@@ -595,7 +609,6 @@
 			/* Makes sure that above write goes through */
 			wmb();
 		} else { /* Cable connect case */
-			qusb_phy_enable_power(qphy, true, true);
 			qusb_phy_enable_clocks(qphy, true);
 		}
 		qphy->suspended = false;
@@ -636,15 +649,17 @@
 	int ret = 0;
 	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
 
-	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+				__func__, qphy->dpdm_enable);
 
-	if (qphy->rm_pulldown) {
-		ret = qusb_phy_enable_power(qphy, true, false);
-		if (ret >= 0) {
-			qphy->rm_pulldown = true;
-			dev_dbg(qphy->phy.dev, "dpdm_enable:rm_pulldown:%d\n",
-							qphy->rm_pulldown);
+	if (!qphy->dpdm_enable) {
+		ret = qusb_phy_enable_power(qphy, true);
+		if (ret < 0) {
+			dev_dbg(qphy->phy.dev,
+				"dpdm regulator enable failed:%d\n", ret);
+			return ret;
 		}
+		qphy->dpdm_enable = true;
 	}
 
 	return ret;
@@ -655,15 +670,17 @@
 	int ret = 0;
 	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
 
-	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+				__func__, qphy->dpdm_enable);
 
-	if (!qphy->rm_pulldown) {
-		ret = qusb_phy_enable_power(qphy, false, false);
-		if (ret >= 0) {
-			qphy->rm_pulldown = false;
-			dev_dbg(qphy->phy.dev, "dpdm_disable:rm_pulldown:%d\n",
-							qphy->rm_pulldown);
+	if (qphy->dpdm_enable) {
+		ret = qusb_phy_enable_power(qphy, false);
+		if (ret < 0) {
+			dev_dbg(qphy->phy.dev,
+				"dpdm regulator disable failed:%d\n", ret);
+			return ret;
 		}
+		qphy->dpdm_enable = false;
 	}
 
 	return ret;
@@ -673,9 +690,9 @@
 {
 	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
 
-	dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
-					qphy->rm_pulldown);
-	return qphy->rm_pulldown;
+	dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
+					qphy->dpdm_enable);
+	return qphy->dpdm_enable;
 }
 
 static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
@@ -964,6 +981,7 @@
 		return PTR_ERR(qphy->vdda18);
 	}
 
+	mutex_init(&qphy->lock);
 	platform_set_drvdata(pdev, qphy);
 
 	qphy->phy.label			= "msm-qusb-phy-v2";
@@ -991,7 +1009,7 @@
 
 	usb_remove_phy(&qphy->phy);
 	qusb_phy_enable_clocks(qphy, false);
-	qusb_phy_enable_power(qphy, false, true);
+	qusb_phy_enable_power(qphy, false);
 
 	return 0;
 }
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 76b034e..e355e35 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -130,7 +130,7 @@
 	bool			cable_connected;
 	bool			suspended;
 	bool			ulpi_mode;
-	bool			rm_pulldown;
+	bool			dpdm_enable;
 	bool			is_se_clk;
 
 	struct regulator_desc	dpdm_rdesc;
@@ -673,15 +673,17 @@
 	int ret = 0;
 	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
 
-	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+				__func__, qphy->dpdm_enable);
 
-	if (qphy->rm_pulldown) {
+	if (!qphy->dpdm_enable) {
 		ret = qusb_phy_enable_power(qphy, true, false);
-		if (ret >= 0) {
-			qphy->rm_pulldown = true;
-			dev_dbg(qphy->phy.dev, "dpdm_enable:rm_pulldown:%d\n",
-							qphy->rm_pulldown);
+		if (ret < 0) {
+			dev_dbg(qphy->phy.dev,
+				"dpdm regulator enable failed:%d\n", ret);
+			return ret;
 		}
+		qphy->dpdm_enable = true;
 	}
 
 	return ret;
@@ -692,15 +694,17 @@
 	int ret = 0;
 	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
 
-	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+	dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+				__func__, qphy->dpdm_enable);
 
-	if (!qphy->rm_pulldown) {
+	if (qphy->dpdm_enable) {
 		ret = qusb_phy_enable_power(qphy, false, false);
-		if (ret >= 0) {
-			qphy->rm_pulldown = false;
-			dev_dbg(qphy->phy.dev, "dpdm_disable:rm_pulldown:%d\n",
-							qphy->rm_pulldown);
+		if (ret < 0) {
+			dev_dbg(qphy->phy.dev,
+				"dpdm regulator disable failed:%d\n", ret);
+			return ret;
 		}
+		qphy->dpdm_enable = false;
 	}
 
 	return ret;
@@ -710,9 +714,9 @@
 {
 	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
 
-	dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
-					qphy->rm_pulldown);
-	return qphy->rm_pulldown;
+	dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
+					qphy->dpdm_enable);
+	return qphy->dpdm_enable;
 }
 
 static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5b74917..18f7612 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1632,7 +1632,7 @@
 	.release	= single_release,
 };
 
-#endif	/* CONFIG_SCHED_HMP */
+#endif	/* CONFIG_SCHED_WALT */
 
 #ifdef CONFIG_SCHED_AUTOGROUP
 /*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 40d04f9..7627c76 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1503,9 +1503,6 @@
 	u32 sum_history[RAVG_HIST_SIZE_MAX];
 	u32 *curr_window_cpu, *prev_window_cpu;
 	u32 curr_window, prev_window;
-#ifdef CONFIG_SCHED_HMP
-	u64 curr_burst, avg_burst, avg_sleep_time;
-#endif
 	u16 active_windows;
 	u32 pred_demand;
 	u8 busy_buckets[NUM_BUSY_BUCKETS];
@@ -2653,38 +2650,10 @@
 
 #define MAX_NUM_CGROUP_COLOC_ID	20
 
-#ifdef CONFIG_SCHED_HMP
-extern int sched_set_window(u64 window_start, unsigned int window_size);
-extern unsigned long sched_get_busy(int cpu);
-extern void sched_get_cpus_busy(struct sched_load *busy,
-				const struct cpumask *query_cpus);
-extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
-extern u32 sched_get_init_task_load(struct task_struct *p);
-extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
-extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
-extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
-extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
-extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle);
-extern unsigned int sched_get_cluster_wake_idle(int cpu);
-extern int sched_update_freq_max_load(const cpumask_t *cpumask);
-extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
-							u32 fmin, u32 fmax);
-extern void sched_set_cpu_cstate(int cpu, int cstate,
-			 int wakeup_energy, int wakeup_latency);
-extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
-				int wakeup_energy, int wakeup_latency);
-extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
-extern unsigned int sched_get_group_id(struct task_struct *p);
-
-#else /* CONFIG_SCHED_HMP */
 static inline int sched_set_window(u64 window_start, unsigned int window_size)
 {
 	return -EINVAL;
 }
-static inline unsigned long sched_get_busy(int cpu)
-{
-	return 0;
-}
 static inline void sched_get_cpus_busy(struct sched_load *busy,
 				       const struct cpumask *query_cpus) {};
 
@@ -2698,12 +2667,6 @@
 {
 }
 
-static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
-			int dstate, int wakeup_energy, int wakeup_latency)
-{
-}
-#endif /* CONFIG_SCHED_HMP */
-
 #ifdef CONFIG_SCHED_WALT
 extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
 extern void sched_set_io_is_busy(int val);
@@ -2731,10 +2694,8 @@
 #endif /* CONFIG_SCHED_WALT */
 
 #ifndef CONFIG_SCHED_WALT
-#ifndef CONFIG_SCHED_HMP
 static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
 					u32 fmin, u32 fmax) { }
-#endif /* CONFIG_SCHED_HMP */
 #endif /* CONFIG_SCHED_WALT */
 
 #ifdef CONFIG_NO_HZ_COMMON
@@ -2847,7 +2808,7 @@
 task_sched_runtime(struct task_struct *task);
 
 /* sched_exec is called by processes performing an exec */
-#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
+#ifdef CONFIG_SMP
 extern void sched_exec(void);
 #else
 #define sched_exec()   {}
@@ -2982,7 +2943,6 @@
 
 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
 extern int wake_up_process(struct task_struct *tsk);
-extern int wake_up_process_no_notif(struct task_struct *tsk);
 extern void wake_up_new_task(struct task_struct *tsk);
 #ifdef CONFIG_SMP
  extern void kick_process(struct task_struct *tsk);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index f0ba8e6..322bc23 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -37,47 +37,13 @@
 extern unsigned int sysctl_sched_boost;
 extern unsigned int sysctl_sched_group_upmigrate_pct;
 extern unsigned int sysctl_sched_group_downmigrate_pct;
-#endif
-
-#ifdef CONFIG_SCHED_HMP
-
-enum freq_reporting_policy {
-	FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK,
-	FREQ_REPORT_CPU_LOAD,
-	FREQ_REPORT_TOP_TASK,
-	FREQ_REPORT_INVALID_POLICY
-};
-
-extern int sysctl_sched_freq_inc_notify;
-extern int sysctl_sched_freq_dec_notify;
-extern unsigned int sysctl_sched_freq_reporting_policy;
-extern unsigned int sysctl_sched_window_stats_policy;
-extern unsigned int sysctl_sched_ravg_hist_size;
-extern unsigned int sysctl_sched_spill_nr_run;
-extern unsigned int sysctl_sched_spill_load_pct;
-extern unsigned int sysctl_sched_upmigrate_pct;
-extern unsigned int sysctl_sched_downmigrate_pct;
-extern unsigned int sysctl_early_detection_duration;
-extern unsigned int sysctl_sched_small_wakee_task_load_pct;
-extern unsigned int sysctl_sched_big_waker_task_load_pct;
-extern unsigned int sysctl_sched_select_prev_cpu_us;
-extern unsigned int sysctl_sched_restrict_cluster_spill;
-extern unsigned int sysctl_sched_pred_alert_freq;
-extern unsigned int sysctl_sched_freq_aggregate;
-extern unsigned int sysctl_sched_enable_thread_grouping;
-extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
-extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
-extern unsigned int sysctl_sched_short_burst;
-extern unsigned int sysctl_sched_short_sleep;
-
-#elif defined(CONFIG_SCHED_WALT)
 
 extern int
 walt_proc_update_handler(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp,
 			 loff_t *ppos);
 
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
 
 enum sched_tunable_scaling {
 	SCHED_TUNABLESCALING_NONE,
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e94a82b..420c6b5 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -71,7 +71,7 @@
 		__field(unsigned long,	cpu_load		)
 		__field(unsigned int,	rt_nr_running		)
 		__field(unsigned int,	cpus_allowed		)
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 		__field(unsigned int,	demand			)
 		__field(unsigned int,	pred_demand		)
 #endif
@@ -87,22 +87,22 @@
 		__entry->cpu_load	= task_rq(p)->cpu_load[0];
 		__entry->rt_nr_running	= task_rq(p)->rt.rt_nr_running;
 		__entry->cpus_allowed	= cpus_allowed;
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 		__entry->demand		= p->ravg.demand;
 		__entry->pred_demand	= p->ravg.pred_demand;
 #endif
 	),
 
 	TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
-#ifdef CONFIG_SCHED_HMP
-		 " demand=%u pred_demand=%u"
+#ifdef CONFIG_SCHED_WALT
+			" demand=%u pred_demand=%u"
 #endif
 			, __entry->cpu,
 			__entry->enqueue ? "enqueue" : "dequeue",
 			__entry->comm, __entry->pid,
 			__entry->prio, __entry->nr_running,
 			__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 			, __entry->demand, __entry->pred_demand
 #endif
 			)
@@ -549,9 +549,9 @@
 #ifdef CONFIG_SCHED_WALT
 DECLARE_EVENT_CLASS(sched_cpu_load,
 
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
 
-	TP_ARGS(rq, idle, irqload, power_cost, temp),
+	TP_ARGS(rq, idle, irqload, power_cost),
 
 	TP_STRUCT__entry(
 		__field(unsigned int, cpu			)
@@ -566,7 +566,6 @@
 		__field(unsigned int, power_cost		)
 		__field(	 int, cstate			)
 		__field(	 int, dstate			)
-		__field(	 int, temp			)
 	),
 
 	TP_fast_assign(
@@ -582,181 +581,22 @@
 		__entry->power_cost		= power_cost;
 		__entry->cstate			= rq->cstate;
 		__entry->dstate			= rq->cluster->dstate;
-		__entry->temp			= temp;
 	),
 
-	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
+	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d",
 	__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
 	__entry->load_scale_factor, __entry->capacity,
 	__entry->cumulative_runnable_avg, __entry->irqload,
 	__entry->max_freq, __entry->power_cost, __entry->cstate,
-	__entry->dstate, __entry->temp)
+	__entry->dstate)
 );
 
 DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
-	TP_ARGS(rq, idle, irqload, power_cost, temp)
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
+	TP_ARGS(rq, idle, irqload, power_cost)
 );
 #endif
 
-#ifdef CONFIG_SCHED_HMP
-
-TRACE_EVENT(sched_task_load,
-
-	TP_PROTO(struct task_struct *p, bool boost, int reason,
-		 bool sync, bool need_idle, u32 flags, int best_cpu),
-
-	TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
-
-	TP_STRUCT__entry(
-		__array(	char,	comm,	TASK_COMM_LEN	)
-		__field(	pid_t,	pid			)
-		__field(unsigned int,	demand			)
-		__field(	bool,	boost			)
-		__field(	int,	reason			)
-		__field(	bool,	sync			)
-		__field(	bool,	need_idle		)
-		__field(	u32,	flags			)
-		__field(	int,	best_cpu		)
-		__field(	u64,	latency			)
-		__field(	int,	grp_id			)
-		__field(	u64,	avg_burst		)
-		__field(	u64,	avg_sleep		)
-	),
-
-	TP_fast_assign(
-		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-		__entry->pid		= p->pid;
-		__entry->demand		= p->ravg.demand;
-		__entry->boost		= boost;
-		__entry->reason		= reason;
-		__entry->sync		= sync;
-		__entry->need_idle	= need_idle;
-		__entry->flags		= flags;
-		__entry->best_cpu	= best_cpu;
-		__entry->latency	= p->state == TASK_WAKING ?
-						      sched_ktime_clock() -
-						      p->ravg.mark_start : 0;
-		__entry->grp_id		= p->grp ? p->grp->id : 0;
-		__entry->avg_burst	= p->ravg.avg_burst;
-		__entry->avg_sleep	= p->ravg.avg_sleep_time;
-	),
-
-	TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu",
-		__entry->pid, __entry->comm, __entry->demand,
-		__entry->boost, __entry->reason, __entry->sync,
-		__entry->need_idle, __entry->flags, __entry->grp_id,
-		__entry->best_cpu, __entry->latency, __entry->avg_burst,
-		__entry->avg_sleep)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
-	TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
-	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
-	TP_ARGS(rq, idle, irqload, power_cost, temp)
-);
-
-TRACE_EVENT(sched_reset_all_window_stats,
-
-	TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
-		int reason, unsigned int old_val, unsigned int new_val),
-
-	TP_ARGS(window_start, window_size, time_taken,
-		reason, old_val, new_val),
-
-	TP_STRUCT__entry(
-		__field(	u64,	window_start		)
-		__field(	u64,	window_size		)
-		__field(	u64,	time_taken		)
-		__field(	int,	reason			)
-		__field(unsigned int,	old_val			)
-		__field(unsigned int,	new_val			)
-	),
-
-	TP_fast_assign(
-		__entry->window_start = window_start;
-		__entry->window_size = window_size;
-		__entry->time_taken = time_taken;
-		__entry->reason	= reason;
-		__entry->old_val = old_val;
-		__entry->new_val = new_val;
-	),
-
-	TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
-		  __entry->time_taken, __entry->window_start,
-		  __entry->window_size,
-		  sched_window_reset_reasons[__entry->reason],
-		  __entry->old_val, __entry->new_val)
-);
-
-TRACE_EVENT(sched_get_busy,
-
-	TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early),
-
-	TP_ARGS(cpu, load, nload, pload, early),
-
-	TP_STRUCT__entry(
-		__field(	int,	cpu			)
-		__field(	u64,	load			)
-		__field(	u64,	nload			)
-		__field(	u64,	pload			)
-		__field(	int,	early			)
-	),
-
-	TP_fast_assign(
-		__entry->cpu		= cpu;
-		__entry->load		= load;
-		__entry->nload		= nload;
-		__entry->pload		= pload;
-		__entry->early		= early;
-	),
-
-	TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d",
-		__entry->cpu, __entry->load, __entry->nload,
-		__entry->pload, __entry->early)
-);
-
-TRACE_EVENT(sched_freq_alert,
-
-	TP_PROTO(int cpu, int pd_notif, int check_groups, struct rq *rq,
-		u64 new_load),
-
-	TP_ARGS(cpu, pd_notif, check_groups, rq, new_load),
-
-	TP_STRUCT__entry(
-		__field(	int,	cpu			)
-		__field(	int,	pd_notif		)
-		__field(	int,	check_groups		)
-		__field(	u64,	old_busy_time		)
-		__field(	u64,	ps			)
-		__field(	u64,	new_load		)
-		__field(	u64,	old_pred		)
-		__field(	u64,	new_pred		)
-	),
-
-	TP_fast_assign(
-		__entry->cpu		= cpu;
-		__entry->pd_notif	= pd_notif;
-		__entry->check_groups	= check_groups;
-		__entry->old_busy_time	= rq->old_busy_time;
-		__entry->ps		= rq->prev_runnable_sum;
-		__entry->new_load	= new_load;
-		__entry->old_pred	= rq->old_estimated_time;
-		__entry->new_pred	= rq->hmp_stats.pred_demands_sum;
-	),
-
-	TP_printk("cpu %d pd_notif=%d check_groups %d old_busy_time=%llu prev_sum=%lld new_load=%llu old_pred=%llu new_pred=%llu",
-		__entry->cpu, __entry->pd_notif, __entry->check_groups,
-		__entry->old_busy_time, __entry->ps, __entry->new_load,
-		__entry->old_pred, __entry->new_pred)
-);
-
-#endif	/* CONFIG_SCHED_HMP */
-
 #ifdef CONFIG_SMP
 TRACE_EVENT(sched_cpu_util,
 
diff --git a/init/Kconfig b/init/Kconfig
index 954de19..af000c7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1276,16 +1276,6 @@
 
 endif # CGROUPS
 
-config SCHED_HMP
-	bool "Scheduler support for heterogenous multi-processor systems"
-	select SCHED_WALT
-	depends on SMP && FAIR_GROUP_SCHED
-	help
-	  This feature will let the scheduler optimize task placement on
-	  systems made of heterogeneous cpus i.e cpus that differ either
-	  in their instructions per-cycle capability or the maximum
-	  frequency they can attain.
-
 config SCHED_WALT
 	bool "WALT"
 	depends on SMP && FAIR_GROUP_SCHED
@@ -1293,14 +1283,6 @@
 	  Use Window-Assisted Load Tracking (WALT) as an alternative or
 	  additional load tracking scheme in lieu of or along with PELT.
 
-config SCHED_HMP_CSTATE_AWARE
-	bool "CPU C-state aware scheduler"
-	depends on SCHED_HMP
-	help
-	  This feature will let the HMP scheduler optimize task placement
-	  with CPUs C-state. If this is enabled, scheduler places tasks
-	  onto the shallowest C-state CPU among the most power efficient CPUs.
-
 config SCHED_CORE_CTL
 	bool "QTI Core Control"
 	depends on SMP
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 3d12ce8..f6cce95 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -18,7 +18,6 @@
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
 obj-y += wait.o swait.o completion.o idle.o
-obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o sched_avg.o
 obj-$(CONFIG_SCHED_WALT) += walt.o boost.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c2433b3..4b59880 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1074,7 +1074,6 @@
 	struct migration_arg *arg = data;
 	struct task_struct *p = arg->task;
 	struct rq *rq = this_rq();
-	int src_cpu = cpu_of(rq);
 	bool moved = false;
 
 	/*
@@ -1109,9 +1108,6 @@
 
 	local_irq_enable();
 
-	if (moved)
-		notify_migration(src_cpu, arg->dest_cpu, false, p);
-
 	return 0;
 }
 
@@ -1287,8 +1283,6 @@
 #endif
 #endif
 
-	trace_sched_migrate_task(p, new_cpu, pct_task_load(p));
-
 	if (task_cpu(p) != new_cpu) {
 		if (p->sched_class->migrate_task_rq)
 			p->sched_class->migrate_task_rq(p);
@@ -2091,12 +2085,9 @@
 	struct related_thread_group *grp = NULL;
 	int src_cpu;
 	bool notif_required = false;
-	bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER);
 	bool check_group = false;
 #endif
 
-	wake_flags &= ~WF_NO_NOTIFIER;
-
 	/*
 	 * If we are going to wake up a thread waiting for CONDITION we
 	 * need to ensure that CONDITION=1 done by the caller can not be
@@ -2208,19 +2199,6 @@
 out:
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
-#ifdef CONFIG_SMP
-	if (freq_notif_allowed) {
-		if (notif_required && !same_freq_domain(src_cpu, cpu)) {
-			check_for_freq_change(cpu_rq(cpu),
-						false, check_group);
-			check_for_freq_change(cpu_rq(src_cpu),
-						false, check_group);
-		} else if (success) {
-			check_for_freq_change(cpu_rq(cpu), true, false);
-		}
-	}
-#endif
-
 	return success;
 }
 
@@ -2302,26 +2280,6 @@
 }
 EXPORT_SYMBOL(wake_up_process);
 
-/**
- * wake_up_process_no_notif - Wake up a specific process without notifying
- * governor
- * @p: The process to be woken up.
- *
- * Attempt to wake up the nominated process and move it to the set of runnable
- * processes.
- *
- * Return: 1 if the process was woken up, 0 if it was already running.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-int wake_up_process_no_notif(struct task_struct *p)
-{
-	WARN_ON(task_is_stopped_or_traced(p));
-	return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER);
-}
-EXPORT_SYMBOL(wake_up_process_no_notif);
-
 int wake_up_state(struct task_struct *p, unsigned int state)
 {
 	return try_to_wake_up(p, state, 0);
@@ -3165,7 +3123,7 @@
 	*load = rq->load.weight;
 }
 
-#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
+#ifdef CONFIG_SMP
 
 /*
  * sched_exec - execve() is a valuable balancing opportunity, because at
@@ -3692,15 +3650,10 @@
 		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
 		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
 		cpufreq_update_util(rq, 0);
-		if (!is_idle_task(prev) && !prev->on_rq)
-			update_avg_burst(prev);
-
 		rq->nr_switches++;
 		rq->curr = next;
 		++*switch_count;
 
-		set_task_last_switch_out(prev, wallclock);
-
 		trace_sched_switch(preempt, prev, next);
 		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
 	} else {
@@ -5953,7 +5906,6 @@
 		if (rq != dead_rq) {
 			raw_spin_unlock(&rq->lock);
 			raw_spin_unlock(&next->pi_lock);
-			notify_migration(dead_rq->cpu, dest_cpu, true, next);
 			rq = dead_rq;
 			raw_spin_lock(&next->pi_lock);
 			raw_spin_lock(&rq->lock);
@@ -8236,9 +8188,6 @@
 	for (i = 0; i < WAIT_TABLE_SIZE; i++)
 		init_waitqueue_head(bit_wait_table + i);
 
-#ifdef CONFIG_SCHED_HMP
-	pr_info("HMP scheduling enabled.\n");
-#endif
 	sched_boost_parse_dt();
 	init_clusters();
 
@@ -8427,8 +8376,6 @@
 	i = alloc_related_thread_groups();
 	BUG_ON(i);
 
-	set_hmp_defaults();
-
 	set_load_weight(&init_task);
 
 	/*
@@ -9552,13 +9499,6 @@
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 static struct cftype cpu_files[] = {
-#ifdef CONFIG_SCHED_HMP
-	{
-		.name = "upmigrate_discourage",
-		.read_u64 = cpu_upmigrate_discourage_read_u64,
-		.write_u64 = cpu_upmigrate_discourage_write_u64,
-	},
-#endif
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	{
 		.name = "shares",
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index b140e55..4c3bf526 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -612,8 +612,7 @@
 	spin_lock_irqsave(&cluster->pending_lock, flags);
 	cluster->pending = true;
 	spin_unlock_irqrestore(&cluster->pending_lock, flags);
-
-	wake_up_process_no_notif(cluster->core_ctl_thread);
+	wake_up_process(cluster->core_ctl_thread);
 }
 
 static u64 core_ctl_check_timestamp;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index ed9f6db..b520691 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -698,10 +698,6 @@
 	P(cpu_capacity);
 #endif
 #ifdef CONFIG_SCHED_WALT
-#ifdef CONFIG_SCHED_HMP
-	P(static_cpu_pwr_cost);
-	P(cluster->static_cluster_pwr_cost);
-#endif
 	P(cluster->load_scale_factor);
 	P(cluster->capacity);
 	P(cluster->max_possible_capacity);
@@ -794,10 +790,6 @@
 	P(sysctl_sched_child_runs_first);
 	P(sysctl_sched_features);
 #ifdef CONFIG_SCHED_WALT
-#ifdef CONFIG_SCHED_HMP
-	P(sched_upmigrate);
-	P(sched_downmigrate);
-#endif
 	P(sched_init_task_load_windows);
 	P(min_capacity);
 	P(max_capacity);
@@ -965,9 +957,6 @@
 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 {
 	unsigned long nr_switches;
-	unsigned int load_avg;
-
-	load_avg = pct_task_load(p);
 
 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
 						get_nr_threads(p));
@@ -1025,12 +1014,9 @@
 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
 		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
 		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
-#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
-		__P(load_avg);
-#ifdef CONFIG_SCHED_HMP
+#ifdef CONFIG_SCHED_WALT
 		P(ravg.demand);
 #endif
-#endif
 		avg_atom = p->se.sum_exec_runtime;
 		if (nr_switches)
 			avg_atom = div64_ul(avg_atom, nr_switches);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1c5bb37..db8d37f 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -48,52 +48,6 @@
 				       u32 new_task_load, u32 new_pred_demand);
 #endif
 
-#ifdef CONFIG_SCHED_HMP
-#ifdef CONFIG_CFS_BANDWIDTH
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-				 struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-				 struct task_struct *p, int change_cra);
-
-static inline void dec_throttled_cfs_rq_hmp_stats(
-				struct hmp_sched_stats *stats,
-				struct cfs_rq *cfs_rq);
-
-static inline void inc_throttled_cfs_rq_hmp_stats(
-				struct hmp_sched_stats *stats,
-				struct cfs_rq *cfs_rq);
-
-static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq);
-
-#else /* CONFIG_CFS_BANDWIDTH */
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-				 struct task_struct *p, int change_cra) { }
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-				 struct task_struct *p, int change_cra) { }
-#endif /* CONFIG_CFS_BANDWIDTH */
-
-#ifdef CONFIG_SMP
-
-static struct rq *find_busiest_queue_hmp(struct lb_env *env,
-					struct sched_group *group);
-static int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds);
-
-static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
-						  struct sd_lb_stats *sds,
-						  struct sched_group *sg,
-						  struct sg_lb_stats *sgs);
-
-static int select_best_cpu(struct task_struct *p, int target, int reason,
-			   int sync);
-
-#ifdef CONFIG_NO_HZ_COMMON
-static int find_new_hmp_ilb(int type);
-static int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type);
-#endif /* CONFIG_NO_HZ_COMMON */
-#endif /* CONFIG_SMP */
-#else /* CONFIG_SCHED_HMP */
-
 static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
 				 struct task_struct *p, int change_cra) { }
 static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
@@ -123,8 +77,6 @@
 }
 #endif /* CONFIG_SMP */
 
-#endif /* CONFIG_SCHED_HMP */
-
 #ifdef CONFIG_SCHED_WALT
 static inline bool task_fits_max(struct task_struct *p, int cpu);
 #endif
@@ -4129,8 +4081,7 @@
 	/* Log effect on hmp stats after throttling */
 	trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
 				sched_irqload(cpu_of(rq)),
-				power_cost(cpu_of(rq), 0),
-				cpu_temp(cpu_of(rq)));
+				power_cost(cpu_of(rq), 0));
 }
 
 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
@@ -4186,8 +4137,7 @@
 	/* Log effect on hmp stats after un-throttling */
 	trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
 				sched_irqload(cpu_of(rq)),
-				power_cost(cpu_of(rq), 0),
-				cpu_temp(cpu_of(rq)));
+				power_cost(cpu_of(rq), 0));
 }
 
 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -7075,10 +7025,6 @@
 	int want_affine = 0;
 	int sync = wake_flags & WF_SYNC;
 
-#ifdef CONFIG_SCHED_HMP
-	return select_best_cpu(p, prev_cpu, 0, sync);
-#endif
-
 	if (energy_aware()) {
 		rcu_read_lock();
 		new_cpu = energy_aware_wake_cpu(p, prev_cpu, sync);
@@ -7751,9 +7697,6 @@
 	enum fbq_type		fbq_type;
 	enum group_type		busiest_group_type;
 	struct list_head	tasks;
-#ifdef CONFIG_SCHED_HMP
-	enum sched_boost_policy	boost_policy;
-#endif
 };
 
 /*
@@ -7851,9 +7794,6 @@
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
 	int tsk_cache_hot;
-#ifdef CONFIG_SCHED_HMP
-	int twf, group_cpus;
-#endif
 
 	lockdep_assert_held(&env->src_rq->lock);
 
@@ -7924,37 +7864,6 @@
 		return 0;
 #endif
 
-#ifdef CONFIG_SCHED_HMP
-	if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu)) {
-		if (nr_big_tasks(env->src_rq) && !is_big_task(p))
-			return 0;
-
-		if (env->boost_policy == SCHED_BOOST_ON_BIG &&
-					!task_sched_boost(p))
-			return 0;
-	}
-
-	twf = task_will_fit(p, env->dst_cpu);
-
-	/*
-	 * Attempt to not pull tasks that don't fit. We may get lucky and find
-	 * one that actually fits.
-	 */
-	if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
-		return 0;
-
-	/*
-	 * Group imbalance can sometimes cause work to be pulled across groups
-	 * even though the group could have managed the imbalance on its own.
-	 * Prevent inter-cluster migrations for big tasks when the number of
-	 * tasks is lower than the capacity of the group.
-	 */
-	group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity,
-						 SCHED_CAPACITY_SCALE);
-	if (!twf && env->busiest_nr_running <= group_cpus)
-		return 0;
-#endif
-
 	if (task_running(env->src_rq, p)) {
 		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
 		return 0;
@@ -8295,10 +8204,6 @@
 	unsigned long group_capacity;
 	unsigned long group_util; /* Total utilization of the group */
 	unsigned int sum_nr_running; /* Nr tasks running in the group */
-#ifdef CONFIG_SCHED_HMP
-	unsigned long sum_nr_big_tasks;
-	u64 group_cpu_load; /* Scaled load of all CPUs of the group */
-#endif
 	unsigned int idle_cpus;
 	unsigned int group_weight;
 	enum group_type group_type;
@@ -8342,10 +8247,6 @@
 			.avg_load = 0UL,
 			.sum_nr_running = 0,
 			.group_type = group_other,
-#ifdef CONFIG_SCHED_HMP
-			.sum_nr_big_tasks = 0UL,
-			.group_cpu_load = 0ULL,
-#endif
 		},
 	};
 }
@@ -8676,8 +8577,7 @@
 
 		trace_sched_cpu_load_lb(cpu_rq(i), idle_cpu(i),
 				     sched_irqload(i),
-				     power_cost(i, 0),
-				     cpu_temp(i));
+				     power_cost(i, 0));
 
 		if (cpu_isolated(i))
 			continue;
@@ -8696,11 +8596,6 @@
 		if (nr_running > 1)
 			*overload = true;
 
-#ifdef CONFIG_SCHED_HMP
-		sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
-		sgs->group_cpu_load += cpu_load(i);
-#endif
-
 #ifdef CONFIG_NUMA_BALANCING
 		sgs->nr_numa_running += rq->nr_numa_running;
 		sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -9309,10 +9204,6 @@
 	unsigned long busiest_load = 0, busiest_capacity = 1;
 	int i;
 
-#ifdef CONFIG_SCHED_HMP
-	return find_busiest_queue_hmp(env, group);
-#endif
-
 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
 		unsigned long capacity, wl;
 		enum fbq_type rt;
@@ -9518,9 +9409,6 @@
 		.loop			= 0,
 		.busiest_nr_running     = 0,
 		.busiest_grp_capacity   = 0,
-#ifdef CONFIG_SCHED_HMP
-		.boost_policy		= sched_boost_policy(),
-#endif
 	};
 
 	/*
@@ -9727,21 +9615,9 @@
 			sd->nr_balance_failed = sd->cache_nice_tries +
 					NEED_ACTIVE_BALANCE_THRESHOLD - 1;
 		}
-	} else {
+	} else
 		sd->nr_balance_failed = 0;
 
-		/* Assumes one 'busiest' cpu that we pulled tasks from */
-		if (!same_freq_domain(this_cpu, cpu_of(busiest))) {
-			int check_groups = !!(env.flags &
-					 LBF_MOVED_RELATED_THREAD_GROUP_TASK);
-
-			check_for_freq_change(this_rq, false, check_groups);
-			check_for_freq_change(busiest, false, check_groups);
-		} else {
-			check_for_freq_change(this_rq, true, false);
-		}
-	}
-
 	if (likely(!active_balance)) {
 		/* We were unbalanced, so reset the balancing interval */
 		sd->balance_interval = sd->min_interval;
@@ -9976,9 +9852,6 @@
 		.busiest_grp_capacity	= 0,
 		.flags			= 0,
 		.loop			= 0,
-#ifdef CONFIG_SCHED_HMP
-		.boost_policy		= sched_boost_policy(),
-#endif
 	};
 	bool moved = false;
 
@@ -10063,15 +9936,6 @@
 
 	local_irq_enable();
 
-	if (moved && !same_freq_domain(busiest_cpu, target_cpu)) {
-		int check_groups = !!(env.flags &
-					 LBF_MOVED_RELATED_THREAD_GROUP_TASK);
-		check_for_freq_change(busiest_rq, false, check_groups);
-		check_for_freq_change(target_rq, false, check_groups);
-	} else if (moved) {
-		check_for_freq_change(target_rq, true, false);
-	}
-
 	return 0;
 }
 
@@ -10101,10 +9965,6 @@
 	struct rq *rq = cpu_rq(cpu);
 	cpumask_t cpumask;
 
-#ifdef CONFIG_SCHED_HMP
-	return find_new_hmp_ilb(type);
-#endif
-
 	rcu_read_lock();
 	sd = rcu_dereference_check_sched_domain(rq->sd);
 	if (sd) {
@@ -10440,11 +10300,9 @@
 static inline bool nohz_kick_needed(struct rq *rq, int *type)
 {
 	unsigned long now = jiffies;
-#ifndef CONFIG_SCHED_HMP
 	struct sched_domain_shared *sds;
 	struct sched_domain *sd;
 	int nr_busy;
-#endif
 	int cpu = rq->cpu;
 	bool kick = false;
 
@@ -10465,17 +10323,12 @@
 	if (likely(!atomic_read(&nohz.nr_cpus)))
 		return false;
 
-#ifdef CONFIG_SCHED_HMP
-	return _nohz_kick_needed_hmp(rq, cpu, type);
-#endif
-
 	if (time_before(now, nohz.next_balance))
 		return false;
 
 	if (energy_aware())
 		return rq->nr_running >= 2 && cpu_overutilized(cpu);
 
-#ifndef CONFIG_SCHED_HMP
 	rcu_read_lock();
 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
 	if (sds && !energy_aware()) {
@@ -10509,7 +10362,6 @@
 
 unlock:
 	rcu_read_unlock();
-#endif
 	return kick;
 }
 #else
@@ -11198,7 +11050,6 @@
 		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
 					      task_load_delta,
 					      pred_demand_delta);
-		fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 	}
@@ -11208,7 +11059,6 @@
 		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
 					      task_load_delta,
 					      pred_demand_delta);
-		fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
 	}
 }
 
@@ -11225,7 +11075,6 @@
 
 	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
 				      pred_demand_delta);
-	fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
 }
 
 static inline int task_will_be_throttled(struct task_struct *p)
@@ -11268,1025 +11117,7 @@
 
 #endif
 
-/* QHMP/Zone sched implementation begins here */
-
-#ifdef CONFIG_SCHED_HMP
-#ifdef CONFIG_SMP
-
-/* CPU selection flag */
-#define SBC_FLAG_PREV_CPU				0x1
-#define SBC_FLAG_BEST_CAP_CPU				0x2
-#define SBC_FLAG_CPU_COST				0x4
-#define SBC_FLAG_MIN_COST				0x8
-#define SBC_FLAG_IDLE_LEAST_LOADED			0x10
-#define SBC_FLAG_IDLE_CSTATE				0x20
-#define SBC_FLAG_COST_CSTATE_TIE_BREAKER		0x40
-#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER	0x80
-#define SBC_FLAG_CSTATE_LOAD				0x100
-#define SBC_FLAG_BEST_SIBLING				0x200
-#define SBC_FLAG_WAKER_CPU				0x400
-#define SBC_FLAG_PACK_TASK				0x800
-
-/* Cluster selection flag */
-#define SBC_FLAG_COLOC_CLUSTER				0x10000
-#define SBC_FLAG_WAKER_CLUSTER				0x20000
-#define SBC_FLAG_BACKUP_CLUSTER				0x40000
-#define SBC_FLAG_BOOST_CLUSTER				0x80000
-
-struct cpu_select_env {
-	struct task_struct *p;
-	struct related_thread_group *rtg;
-	u8 reason;
-	u8 need_idle:1;
-	u8 need_waker_cluster:1;
-	u8 sync:1;
-	enum sched_boost_policy boost_policy;
-	u8 pack_task:1;
-	int prev_cpu;
-	DECLARE_BITMAP(candidate_list, NR_CPUS);
-	DECLARE_BITMAP(backup_list, NR_CPUS);
-	u64 task_load;
-	u64 cpu_load;
-	u32 sbc_best_flag;
-	u32 sbc_best_cluster_flag;
-	struct cpumask search_cpus;
-};
-
-struct cluster_cpu_stats {
-	int best_idle_cpu, least_loaded_cpu;
-	int best_capacity_cpu, best_cpu, best_sibling_cpu;
-	int min_cost, best_sibling_cpu_cost;
-	int best_cpu_wakeup_latency;
-	u64 min_load, best_load, best_sibling_cpu_load;
-	s64 highest_spare_capacity;
-};
-
-static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
-{
-	u64 total_load;
-
-	total_load = env->task_load + env->cpu_load;
-
-	if (total_load > sched_spill_load ||
-	    (rq->nr_running + 1) > sysctl_sched_spill_nr_run)
-		return 1;
-
-	return 0;
-}
-
-static int skip_cpu(int cpu, struct cpu_select_env *env)
-{
-	int tcpu = task_cpu(env->p);
-	int skip = 0;
-
-	if (!env->reason)
-		return 0;
-
-	if (is_reserved(cpu))
-		return 1;
-
-	switch (env->reason) {
-	case UP_MIGRATION:
-		skip = !idle_cpu(cpu);
-		break;
-	case IRQLOAD_MIGRATION:
-		/* Purposely fall through */
-	default:
-		skip = (cpu == tcpu);
-		break;
-	}
-
-	return skip;
-}
-
-static inline int
-acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env)
-{
-	int tcpu;
-
-	if (!env->reason)
-		return 1;
-
-	tcpu = task_cpu(env->p);
-	switch (env->reason) {
-	case UP_MIGRATION:
-		return cluster->capacity > cpu_capacity(tcpu);
-
-	case DOWN_MIGRATION:
-		return cluster->capacity < cpu_capacity(tcpu);
-
-	default:
-		break;
-	}
-
-	return 1;
-}
-
-static int
-skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env)
-{
-	if (!test_bit(cluster->id, env->candidate_list))
-		return 1;
-
-	if (!acceptable_capacity(cluster, env)) {
-		__clear_bit(cluster->id, env->candidate_list);
-		return 1;
-	}
-
-	return 0;
-}
-
-static struct sched_cluster *
-select_least_power_cluster(struct cpu_select_env *env)
-{
-	struct sched_cluster *cluster;
-
-	if (env->rtg) {
-		int cpu = cluster_first_cpu(env->rtg->preferred_cluster);
-
-		env->task_load = scale_load_to_cpu(task_load(env->p), cpu);
-
-		if (task_load_will_fit(env->p, env->task_load,
-					cpu, env->boost_policy)) {
-			env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER;
-
-			if (env->boost_policy == SCHED_BOOST_NONE)
-				return env->rtg->preferred_cluster;
-
-			for_each_sched_cluster(cluster) {
-				if (cluster != env->rtg->preferred_cluster) {
-					__set_bit(cluster->id,
-						env->backup_list);
-					__clear_bit(cluster->id,
-						env->candidate_list);
-				}
-			}
-
-			return env->rtg->preferred_cluster;
-		}
-
-		/*
-		 * Since the task load does not fit on the preferred
-		 * cluster anymore, pretend that the task does not
-		 * have any preferred cluster. This allows the waking
-		 * task to get the appropriate CPU it needs as per the
-		 * non co-location placement policy without having to
-		 * wait until the preferred cluster is updated.
-		 */
-		env->rtg = NULL;
-	}
-
-	for_each_sched_cluster(cluster) {
-		if (!skip_cluster(cluster, env)) {
-			int cpu = cluster_first_cpu(cluster);
-
-			env->task_load = scale_load_to_cpu(task_load(env->p),
-									 cpu);
-			if (task_load_will_fit(env->p, env->task_load, cpu,
-					       env->boost_policy))
-				return cluster;
-
-			__set_bit(cluster->id, env->backup_list);
-			__clear_bit(cluster->id, env->candidate_list);
-		}
-	}
-
-	return NULL;
-}
-
-static struct sched_cluster *
-next_candidate(const unsigned long *list, int start, int end)
-{
-	int cluster_id;
-
-	cluster_id = find_next_bit(list, end, start - 1 + 1);
-	if (cluster_id >= end)
-		return NULL;
-
-	return sched_cluster[cluster_id];
-}
-
-static void
-update_spare_capacity(struct cluster_cpu_stats *stats,
-		      struct cpu_select_env *env, int cpu, int capacity,
-		      u64 cpu_load)
-{
-	s64 spare_capacity = sched_ravg_window - cpu_load;
-
-	if (spare_capacity > 0 &&
-	    (spare_capacity > stats->highest_spare_capacity ||
-	     (spare_capacity == stats->highest_spare_capacity &&
-	      ((!env->need_waker_cluster &&
-		capacity > cpu_capacity(stats->best_capacity_cpu)) ||
-	       (env->need_waker_cluster &&
-		cpu_rq(cpu)->nr_running <
-		cpu_rq(stats->best_capacity_cpu)->nr_running))))) {
-		/*
-		 * If sync waker is the only runnable of CPU, cr_avg of the
-		 * CPU is 0 so we have high chance to place the wakee on the
-		 * waker's CPU which likely causes preemtion of the waker.
-		 * This can lead migration of preempted waker.  Place the
-		 * wakee on the real idle CPU when it's possible by checking
-		 * nr_running to avoid such preemption.
-		 */
-		stats->highest_spare_capacity = spare_capacity;
-		stats->best_capacity_cpu = cpu;
-	}
-}
-
-static inline void find_backup_cluster(
-struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
-	struct sched_cluster *next = NULL;
-	int i;
-	struct cpumask search_cpus;
-
-	while (!bitmap_empty(env->backup_list, num_clusters)) {
-		next = next_candidate(env->backup_list, 0, num_clusters);
-		__clear_bit(next->id, env->backup_list);
-
-		cpumask_and(&search_cpus, &env->search_cpus, &next->cpus);
-		for_each_cpu(i, &search_cpus) {
-			trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
-			sched_irqload(i), power_cost(i, task_load(env->p) +
-					cpu_cravg_sync(i, env->sync)), 0);
-
-			update_spare_capacity(stats, env, i, next->capacity,
-					  cpu_load_sync(i, env->sync));
-		}
-		env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER;
-	}
-}
-
-struct sched_cluster *
-next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
-					struct cluster_cpu_stats *stats)
-{
-	struct sched_cluster *next = NULL;
-
-	__clear_bit(cluster->id, env->candidate_list);
-
-	if (env->rtg && preferred_cluster(cluster, env->p))
-		return NULL;
-
-	do {
-		if (bitmap_empty(env->candidate_list, num_clusters))
-			return NULL;
-
-		next = next_candidate(env->candidate_list, 0, num_clusters);
-		if (next) {
-			if (next->min_power_cost > stats->min_cost) {
-				clear_bit(next->id, env->candidate_list);
-				next = NULL;
-				continue;
-			}
-
-			if (skip_cluster(next, env))
-				next = NULL;
-		}
-	} while (!next);
-
-	env->task_load = scale_load_to_cpu(task_load(env->p),
-					cluster_first_cpu(next));
-	return next;
-}
-
-#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE
-static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
-				   struct cpu_select_env *env, int cpu_cost)
-{
-	int wakeup_latency;
-	int prev_cpu = env->prev_cpu;
-
-	wakeup_latency = cpu_rq(cpu)->wakeup_latency;
-
-	if (env->need_idle) {
-		stats->min_cost = cpu_cost;
-		if (idle_cpu(cpu)) {
-			if (wakeup_latency < stats->best_cpu_wakeup_latency ||
-			    (wakeup_latency == stats->best_cpu_wakeup_latency &&
-			     cpu == prev_cpu)) {
-				stats->best_idle_cpu = cpu;
-				stats->best_cpu_wakeup_latency = wakeup_latency;
-			}
-		} else {
-			if (env->cpu_load < stats->min_load ||
-				(env->cpu_load == stats->min_load &&
-							cpu == prev_cpu)) {
-				stats->least_loaded_cpu = cpu;
-				stats->min_load = env->cpu_load;
-			}
-		}
-
-		return;
-	}
-
-	if (cpu_cost < stats->min_cost)  {
-		stats->min_cost = cpu_cost;
-		stats->best_cpu_wakeup_latency = wakeup_latency;
-		stats->best_load = env->cpu_load;
-		stats->best_cpu = cpu;
-		env->sbc_best_flag = SBC_FLAG_CPU_COST;
-		return;
-	}
-
-	/* CPU cost is the same. Start breaking the tie by C-state */
-
-	if (wakeup_latency > stats->best_cpu_wakeup_latency)
-		return;
-
-	if (wakeup_latency < stats->best_cpu_wakeup_latency) {
-		stats->best_cpu_wakeup_latency = wakeup_latency;
-		stats->best_load = env->cpu_load;
-		stats->best_cpu = cpu;
-		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
-		return;
-	}
-
-	/* C-state is the same. Use prev CPU to break the tie */
-	if (cpu == prev_cpu) {
-		stats->best_cpu = cpu;
-		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER;
-		return;
-	}
-
-	if (stats->best_cpu != prev_cpu &&
-	    ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
-	    (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
-		stats->best_load = env->cpu_load;
-		stats->best_cpu = cpu;
-		env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
-	}
-}
-#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */
-static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
-				   struct cpu_select_env *env, int cpu_cost)
-{
-	int prev_cpu = env->prev_cpu;
-
-	if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
-		if (stats->best_sibling_cpu_cost > cpu_cost ||
-		    (stats->best_sibling_cpu_cost == cpu_cost &&
-		     stats->best_sibling_cpu_load > env->cpu_load)) {
-			stats->best_sibling_cpu_cost = cpu_cost;
-			stats->best_sibling_cpu_load = env->cpu_load;
-			stats->best_sibling_cpu = cpu;
-		}
-	}
-
-	if ((cpu_cost < stats->min_cost) ||
-	    ((stats->best_cpu != prev_cpu &&
-	      stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
-		if (env->need_idle) {
-			if (idle_cpu(cpu)) {
-				stats->min_cost = cpu_cost;
-				stats->best_idle_cpu = cpu;
-			}
-		} else {
-			stats->min_cost = cpu_cost;
-			stats->min_load = env->cpu_load;
-			stats->best_cpu = cpu;
-			env->sbc_best_flag = SBC_FLAG_MIN_COST;
-		}
-	}
-}
-#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */
-
-static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
-					 struct cpu_select_env *env)
-{
-	int cpu_cost;
-
-	/*
-	 * We try to find the least loaded *busy* CPU irrespective
-	 * of the power cost.
-	 */
-	if (env->pack_task)
-		cpu_cost = cpu_min_power_cost(cpu);
-
-	else
-		cpu_cost = power_cost(cpu, task_load(env->p) +
-				cpu_cravg_sync(cpu, env->sync));
-
-	if (cpu_cost <= stats->min_cost)
-		__update_cluster_stats(cpu, stats, env, cpu_cost);
-}
-
-static void find_best_cpu_in_cluster(struct sched_cluster *c,
-	 struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
-	int i;
-	struct cpumask search_cpus;
-
-	cpumask_and(&search_cpus, &env->search_cpus, &c->cpus);
-
-	env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
-
-	for_each_cpu(i, &search_cpus) {
-		env->cpu_load = cpu_load_sync(i, env->sync);
-
-		trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
-			sched_irqload(i),
-			power_cost(i, task_load(env->p) +
-					cpu_cravg_sync(i, env->sync)), 0);
-
-		if (skip_cpu(i, env))
-			continue;
-
-		update_spare_capacity(stats, env, i, c->capacity,
-				      env->cpu_load);
-
-		/*
-		 * need_idle takes precedence over sched boost but when both
-		 * are set, idlest CPU with in all the clusters is selected
-		 * when boost_policy = BOOST_ON_ALL whereas idlest CPU in the
-		 * big cluster is selected within boost_policy = BOOST_ON_BIG.
-		 */
-		if ((!env->need_idle &&
-		    env->boost_policy != SCHED_BOOST_NONE) ||
-		    env->need_waker_cluster ||
-		    sched_cpu_high_irqload(i) ||
-		    spill_threshold_crossed(env, cpu_rq(i)))
-			continue;
-
-		update_cluster_stats(i, stats, env);
-	}
-}
-
-static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
-{
-	stats->best_cpu = stats->best_idle_cpu = -1;
-	stats->best_capacity_cpu = stats->best_sibling_cpu  = -1;
-	stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
-	stats->min_load	= stats->best_sibling_cpu_load = ULLONG_MAX;
-	stats->highest_spare_capacity = 0;
-	stats->least_loaded_cpu = -1;
-	stats->best_cpu_wakeup_latency = INT_MAX;
-	/* No need to initialize stats->best_load */
-}
-
-static inline bool env_has_special_flags(struct cpu_select_env *env)
-{
-	if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE ||
-	    env->reason)
-		return true;
-
-	return false;
-}
-
-static inline bool
-bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
-{
-	int prev_cpu;
-	struct task_struct *task = env->p;
-	struct sched_cluster *cluster;
-
-	if (!task->ravg.mark_start || !sched_short_sleep_task_threshold)
-		return false;
-
-	prev_cpu = env->prev_cpu;
-	if (!cpumask_test_cpu(prev_cpu, &env->search_cpus))
-		return false;
-
-	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
-				sched_long_cpu_selection_threshold)
-		return false;
-
-	/*
-	 * This function should be used by task wake up path only as it's
-	 * assuming p->last_switch_out_ts as last sleep time.
-	 * p->last_switch_out_ts can denote last preemption time as well as
-	 * last sleep time.
-	 */
-	if (task->ravg.mark_start - task->last_switch_out_ts >=
-					sched_short_sleep_task_threshold)
-		return false;
-
-	env->task_load = scale_load_to_cpu(task_load(task), prev_cpu);
-	cluster = cpu_rq(prev_cpu)->cluster;
-
-	if (!task_load_will_fit(task, env->task_load, prev_cpu,
-				sched_boost_policy())) {
-
-		__set_bit(cluster->id, env->backup_list);
-		__clear_bit(cluster->id, env->candidate_list);
-		return false;
-	}
-
-	env->cpu_load = cpu_load_sync(prev_cpu, env->sync);
-	if (sched_cpu_high_irqload(prev_cpu) ||
-			spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
-		update_spare_capacity(stats, env, prev_cpu,
-				cluster->capacity, env->cpu_load);
-		cpumask_clear_cpu(prev_cpu, &env->search_cpus);
-		return false;
-	}
-
-	return true;
-}
-
-static inline bool
-wake_to_waker_cluster(struct cpu_select_env *env)
-{
-	return env->sync &&
-	       task_load(current) > sched_big_waker_task_load &&
-	       task_load(env->p) < sched_small_wakee_task_load;
-}
-
-static inline bool
-bias_to_waker_cpu(struct cpu_select_env *env, int cpu)
-{
-	return sysctl_sched_prefer_sync_wakee_to_waker &&
-	       cpu_rq(cpu)->nr_running == 1 &&
-	       cpumask_test_cpu(cpu, &env->search_cpus);
-}
-
-static inline int
-cluster_allowed(struct cpu_select_env *env, struct sched_cluster *cluster)
-{
-	return cpumask_intersects(&env->search_cpus, &cluster->cpus);
-}
-
-/* return cheapest cpu that can fit this task */
-static int select_best_cpu(struct task_struct *p, int target, int reason,
-			   int sync)
-{
-	struct sched_cluster *cluster, *pref_cluster = NULL;
-	struct cluster_cpu_stats stats;
-	struct related_thread_group *grp;
-	unsigned int sbc_flag = 0;
-	int cpu = raw_smp_processor_id();
-	bool special;
-
-	struct cpu_select_env env = {
-		.p			= p,
-		.reason			= reason,
-		.need_idle		= wake_to_idle(p),
-		.need_waker_cluster	= 0,
-		.sync			= sync,
-		.prev_cpu		= target,
-		.rtg			= NULL,
-		.sbc_best_flag		= 0,
-		.sbc_best_cluster_flag	= 0,
-		.pack_task              = false,
-	};
-
-	env.boost_policy = task_sched_boost(p) ?
-			sched_boost_policy() : SCHED_BOOST_NONE;
-
-	bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
-	bitmap_zero(env.backup_list, NR_CPUS);
-
-	cpumask_and(&env.search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
-	cpumask_andnot(&env.search_cpus, &env.search_cpus, cpu_isolated_mask);
-
-	init_cluster_cpu_stats(&stats);
-	special = env_has_special_flags(&env);
-
-	rcu_read_lock();
-
-	grp = task_related_thread_group(p);
-
-	if (grp && grp->preferred_cluster) {
-		pref_cluster = grp->preferred_cluster;
-		if (!cluster_allowed(&env, pref_cluster))
-			clear_bit(pref_cluster->id, env.candidate_list);
-		else
-			env.rtg = grp;
-	} else if (!special) {
-		cluster = cpu_rq(cpu)->cluster;
-		if (wake_to_waker_cluster(&env)) {
-			if (bias_to_waker_cpu(&env, cpu)) {
-				target = cpu;
-				sbc_flag = SBC_FLAG_WAKER_CLUSTER |
-					   SBC_FLAG_WAKER_CPU;
-				goto out;
-			} else if (cluster_allowed(&env, cluster)) {
-				env.need_waker_cluster = 1;
-				bitmap_zero(env.candidate_list, NR_CPUS);
-				__set_bit(cluster->id, env.candidate_list);
-				env.sbc_best_cluster_flag =
-							SBC_FLAG_WAKER_CLUSTER;
-			}
-		} else if (bias_to_prev_cpu(&env, &stats)) {
-			sbc_flag = SBC_FLAG_PREV_CPU;
-			goto out;
-		}
-	}
-
-	if (!special && is_short_burst_task(p)) {
-		env.pack_task = true;
-		sbc_flag = SBC_FLAG_PACK_TASK;
-	}
-retry:
-	cluster = select_least_power_cluster(&env);
-
-	if (!cluster)
-		goto out;
-
-	/*
-	 * 'cluster' now points to the minimum power cluster which can satisfy
-	 * task's perf goals. Walk down the cluster list starting with that
-	 * cluster. For non-small tasks, skip clusters that don't have
-	 * mostly_idle/idle cpus
-	 */
-
-	do {
-		find_best_cpu_in_cluster(cluster, &env, &stats);
-
-	} while ((cluster = next_best_cluster(cluster, &env, &stats)));
-
-	if (env.need_idle) {
-		if (stats.best_idle_cpu >= 0) {
-			target = stats.best_idle_cpu;
-			sbc_flag |= SBC_FLAG_IDLE_CSTATE;
-		} else if (stats.least_loaded_cpu >= 0) {
-			target = stats.least_loaded_cpu;
-			sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED;
-		}
-	} else if (stats.best_cpu >= 0) {
-		if (stats.best_cpu != task_cpu(p) &&
-				stats.min_cost == stats.best_sibling_cpu_cost) {
-			stats.best_cpu = stats.best_sibling_cpu;
-			sbc_flag |= SBC_FLAG_BEST_SIBLING;
-		}
-		sbc_flag |= env.sbc_best_flag;
-		target = stats.best_cpu;
-	} else {
-		if (env.rtg && env.boost_policy == SCHED_BOOST_NONE) {
-			env.rtg = NULL;
-			goto retry;
-		}
-
-		/*
-		 * With boost_policy == SCHED_BOOST_ON_BIG, we reach here with
-		 * backup_list = little cluster, candidate_list = none and
-		 * stats->best_capacity_cpu points the best spare capacity
-		 * CPU among the CPUs in the big cluster.
-		 */
-		if (env.boost_policy == SCHED_BOOST_ON_BIG &&
-		    stats.best_capacity_cpu >= 0)
-			sbc_flag |= SBC_FLAG_BOOST_CLUSTER;
-		else
-			find_backup_cluster(&env, &stats);
-
-		if (stats.best_capacity_cpu >= 0) {
-			target = stats.best_capacity_cpu;
-			sbc_flag |= SBC_FLAG_BEST_CAP_CPU;
-		}
-	}
-	p->last_cpu_selected_ts = sched_ktime_clock();
-out:
-	sbc_flag |= env.sbc_best_cluster_flag;
-	rcu_read_unlock();
-	trace_sched_task_load(p, sched_boost_policy() && task_sched_boost(p),
-		env.reason, env.sync, env.need_idle, sbc_flag, target);
-	return target;
-}
-
-/*
- * Reset balance_interval at all sched_domain levels of given cpu, so that it
- * honors kick.
- */
-static inline void reset_balance_interval(int cpu)
-{
-	struct sched_domain *sd;
-
-	if (cpu >= nr_cpu_ids)
-		return;
-
-	rcu_read_lock();
-	for_each_domain(cpu, sd)
-		sd->balance_interval = 0;
-	rcu_read_unlock();
-}
-
-/*
- * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
- * cpu as per its demand or priority)
- *
- * Returns reason why task needs to be migrated
- */
-static inline int migration_needed(struct task_struct *p, int cpu)
-{
-	int nice;
-	struct related_thread_group *grp;
-
-	if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
-		return 0;
-
-	/* No need to migrate task that is about to be throttled */
-	if (task_will_be_throttled(p))
-		return 0;
-
-	if (sched_boost_policy() == SCHED_BOOST_ON_BIG &&
-		 cpu_capacity(cpu) != max_capacity && task_sched_boost(p))
-		return UP_MIGRATION;
-
-	if (sched_cpu_high_irqload(cpu))
-		return IRQLOAD_MIGRATION;
-
-	nice = task_nice(p);
-	rcu_read_lock();
-	grp = task_related_thread_group(p);
-	/*
-	 * Don't assume higher capacity means higher power. If the task
-	 * is running on the power efficient CPU, avoid migrating it
-	 * to a lower capacity cluster.
-	 */
-	if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
-			upmigrate_discouraged(p)) &&
-			cpu_capacity(cpu) > min_capacity &&
-			cpu_max_power_cost(cpu) == max_power_cost) {
-		rcu_read_unlock();
-		return DOWN_MIGRATION;
-	}
-
-	if (!task_will_fit(p, cpu)) {
-		rcu_read_unlock();
-		return UP_MIGRATION;
-	}
-	rcu_read_unlock();
-
-	return 0;
-}
-
-static DEFINE_RAW_SPINLOCK(migration_lock);
-
-/*
- * Check if currently running task should be migrated to a better cpu.
- *
- * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
- */
-void check_for_migration(struct rq *rq, struct task_struct *p)
-{
-	int cpu = cpu_of(rq), new_cpu;
-	int active_balance = 0, reason;
-
-	reason = migration_needed(p, cpu);
-	if (!reason)
-		return;
-
-	raw_spin_lock(&migration_lock);
-	new_cpu = select_best_cpu(p, cpu, reason, 0);
-
-	if (new_cpu != cpu) {
-		active_balance = kick_active_balance(rq, p, new_cpu);
-		if (active_balance)
-			mark_reserved(new_cpu);
-	}
-
-	raw_spin_unlock(&migration_lock);
-
-	if (active_balance)
-		stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
-					&rq->active_balance_work);
-}
-
-#ifdef CONFIG_CFS_BANDWIDTH
-static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
-{
-	cfs_rq->hmp_stats.nr_big_tasks = 0;
-	cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
-	cfs_rq->hmp_stats.pred_demands_sum = 0;
-}
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-		 struct task_struct *p, int change_cra)
-{
-	inc_nr_big_task(&cfs_rq->hmp_stats, p);
-	if (change_cra)
-		inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-		 struct task_struct *p, int change_cra)
-{
-	dec_nr_big_task(&cfs_rq->hmp_stats, p);
-	if (change_cra)
-		dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
-			 struct cfs_rq *cfs_rq)
-{
-	stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
-	stats->cumulative_runnable_avg +=
-				cfs_rq->hmp_stats.cumulative_runnable_avg;
-	stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
-}
-
-static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
-				 struct cfs_rq *cfs_rq)
-{
-	stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
-	stats->cumulative_runnable_avg -=
-				cfs_rq->hmp_stats.cumulative_runnable_avg;
-	stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
-
-	BUG_ON(stats->nr_big_tasks < 0 ||
-		(s64)stats->cumulative_runnable_avg < 0);
-	BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-#endif	/* CONFIG_CFS_BANDWIDTH */
-
-static int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
-{
-	int local_cpu, busiest_cpu;
-	int local_capacity, busiest_capacity;
-	int local_pwr_cost, busiest_pwr_cost;
-	int nr_cpus;
-	int boost = sched_boost();
-
-	if (!sysctl_sched_restrict_cluster_spill ||
-		boost == FULL_THROTTLE_BOOST || boost == CONSERVATIVE_BOOST)
-		return 0;
-
-	local_cpu = group_first_cpu(sds->local);
-	busiest_cpu = group_first_cpu(sds->busiest);
-
-	local_capacity = cpu_max_possible_capacity(local_cpu);
-	busiest_capacity = cpu_max_possible_capacity(busiest_cpu);
-
-	local_pwr_cost = cpu_max_power_cost(local_cpu);
-	busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
-
-	if (local_pwr_cost <= busiest_pwr_cost)
-		return 0;
-
-	if (local_capacity > busiest_capacity &&
-			sds->busiest_stat.sum_nr_big_tasks)
-		return 0;
-
-	nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest));
-	if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) &&
-		(sds->busiest_stat.sum_nr_running <
-			nr_cpus * sysctl_sched_spill_nr_run))
-		return 1;
-
-	return 0;
-}
-
-static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
-						  struct sd_lb_stats *sds,
-						  struct sched_group *sg,
-						  struct sg_lb_stats *sgs)
-{
-	if (env->idle != CPU_NOT_IDLE &&
-	    cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) {
-		if (sgs->sum_nr_big_tasks >
-				sds->busiest_stat.sum_nr_big_tasks) {
-			env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
-			return true;
-		}
-	}
-
-	return false;
-}
-
-static struct rq *find_busiest_queue_hmp(struct lb_env *env,
-				     struct sched_group *group)
-{
-	struct rq *busiest = NULL, *busiest_big = NULL;
-	u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
-	int max_nr_big = 0, nr_big;
-	bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
-	int i;
-	cpumask_t cpus;
-
-	cpumask_andnot(&cpus, sched_group_cpus(group), cpu_isolated_mask);
-
-	for_each_cpu(i, &cpus) {
-		struct rq *rq = cpu_rq(i);
-		u64 cumulative_runnable_avg =
-				rq->hmp_stats.cumulative_runnable_avg;
-
-		if (!cpumask_test_cpu(i, env->cpus))
-			continue;
-
-
-		if (find_big) {
-			nr_big = nr_big_tasks(rq);
-			if (nr_big > max_nr_big ||
-			    (nr_big > 0 && nr_big == max_nr_big &&
-			     cumulative_runnable_avg > max_runnable_avg_big)) {
-				max_runnable_avg_big = cumulative_runnable_avg;
-				busiest_big = rq;
-				max_nr_big = nr_big;
-				continue;
-			}
-		}
-
-		if (cumulative_runnable_avg > max_runnable_avg) {
-			max_runnable_avg = cumulative_runnable_avg;
-			busiest = rq;
-		}
-	}
-
-	if (busiest_big)
-		return busiest_big;
-
-	env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
-	return busiest;
-}
-
-#ifdef CONFIG_NO_HZ_COMMON
-static inline int find_new_hmp_ilb(int type)
-{
-	int call_cpu = raw_smp_processor_id();
-	struct sched_domain *sd;
-	int ilb;
-
-	rcu_read_lock();
-
-	/* Pick an idle cpu "closest" to call_cpu */
-	for_each_domain(call_cpu, sd) {
-		for_each_cpu_and(ilb, nohz.idle_cpus_mask,
-						sched_domain_span(sd)) {
-			if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
-					cpu_max_power_cost(ilb) <=
-					cpu_max_power_cost(call_cpu))) {
-				rcu_read_unlock();
-				reset_balance_interval(ilb);
-				return ilb;
-			}
-		}
-	}
-
-	rcu_read_unlock();
-	return nr_cpu_ids;
-}
-
-static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
-{
-	struct sched_domain *sd;
-	int i;
-
-	if (rq->nr_running < 2)
-		return 0;
-
-	if (!sysctl_sched_restrict_cluster_spill ||
-			sched_boost_policy() == SCHED_BOOST_ON_ALL)
-		return 1;
-
-	if (cpu_max_power_cost(cpu) == max_power_cost)
-		return 1;
-
-	rcu_read_lock();
-	sd = rcu_dereference_check_sched_domain(rq->sd);
-	if (!sd) {
-		rcu_read_unlock();
-		return 0;
-	}
-
-	for_each_cpu(i, sched_domain_span(sd)) {
-		if (cpu_load(i) < sched_spill_load &&
-				cpu_rq(i)->nr_running <
-				sysctl_sched_spill_nr_run) {
-			/* Change the kick type to limit to CPUs that
-			 * are of equal or lower capacity.
-			 */
-			*type = NOHZ_KICK_RESTRICT;
-			break;
-		}
-	}
-	rcu_read_unlock();
-	return 1;
-}
-#endif /* CONFIG_NO_HZ_COMMON */
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_CFS_BANDWIDTH
-/*
- * Check if task is part of a hierarchy where some cfs_rq does not have any
- * runtime left.
- *
- * We can't rely on throttled_hierarchy() to do this test, as
- * cfs_rq->throttle_count will not be updated yet when this function is called
- * from scheduler_tick()
- */
-static int task_will_be_throttled(struct task_struct *p)
-{
-	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq;
-
-	if (!cfs_bandwidth_used())
-		return 0;
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-		if (!cfs_rq->runtime_enabled)
-			continue;
-		if (cfs_rq->runtime_remaining <= 0)
-			return 1;
-	}
-
-	return 0;
-}
-#endif /* CONFIG_CFS_BANDWIDTH */
-
-#elif defined(CONFIG_SCHED_WALT)
+#if defined(CONFIG_SCHED_WALT)
 
 void check_for_migration(struct rq *rq, struct task_struct *p)
 {
@@ -12314,4 +11145,4 @@
 	}
 }
 
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SCHED_WALT */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
deleted file mode 100644
index 24b60d7..0000000
--- a/kernel/sched/hmp.c
+++ /dev/null
@@ -1,1639 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Implementation credits: Srivatsa Vaddagiri, Steve Muckle
- * Syed Rameez Mustafa, Olav haugan, Joonwoo Park, Pavan Kumar Kondeti
- * and Vikram Mulukutla
- */
-
-#include <linux/cpufreq.h>
-#include <linux/list_sort.h>
-#include <linux/syscore_ops.h>
-
-#include "sched.h"
-#include "walt.h"
-
-#include <trace/events/sched.h>
-
-#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
-
-inline void clear_ed_task(struct task_struct *p, struct rq *rq)
-{
-	if (p == rq->ed_task)
-		rq->ed_task = NULL;
-}
-
-inline void set_task_last_switch_out(struct task_struct *p, u64 wallclock)
-{
-	p->last_switch_out_ts = wallclock;
-}
-
-/*
- * Note C-state for (idle) cpus.
- *
- * @cstate = cstate index, 0 -> active state
- * @wakeup_energy = energy spent in waking up cpu
- * @wakeup_latency = latency to wakeup from cstate
- *
- */
-void
-sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	rq->cstate = cstate; /* C1, C2 etc */
-	rq->wakeup_energy = wakeup_energy;
-	/* disregard small latency delta (64 us). */
-	rq->wakeup_latency = ((wakeup_latency >>
-			       CSTATE_LATENCY_GRANULARITY_SHIFT) <<
-			      CSTATE_LATENCY_GRANULARITY_SHIFT);
-}
-
-/*
- * Note D-state for (idle) cluster.
- *
- * @dstate = dstate index, 0 -> active state
- * @wakeup_energy = energy spent in waking up cluster
- * @wakeup_latency = latency to wakeup from cluster
- *
- */
-void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
-			int wakeup_energy, int wakeup_latency)
-{
-	struct sched_cluster *cluster =
-		cpu_rq(cpumask_first(cluster_cpus))->cluster;
-	cluster->dstate = dstate;
-	cluster->dstate_wakeup_energy = wakeup_energy;
-	cluster->dstate_wakeup_latency = wakeup_latency;
-}
-
-u32 __weak get_freq_max_load(int cpu, u32 freq)
-{
-	/* 100% by default */
-	return 100;
-}
-
-struct freq_max_load_entry {
-	/* The maximum load which has accounted governor's headroom. */
-	u64 hdemand;
-};
-
-struct freq_max_load {
-	struct rcu_head rcu;
-	int length;
-	struct freq_max_load_entry freqs[0];
-};
-
-static DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);
-static DEFINE_SPINLOCK(freq_max_load_lock);
-
-struct cpu_pwr_stats __weak *get_cpu_pwr_stats(void)
-{
-	return NULL;
-}
-
-int sched_update_freq_max_load(const cpumask_t *cpumask)
-{
-	int i, cpu, ret;
-	unsigned int freq;
-	struct cpu_pstate_pwr *costs;
-	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
-	struct freq_max_load *max_load, *old_max_load;
-	struct freq_max_load_entry *entry;
-	u64 max_demand_capacity, max_demand;
-	unsigned long flags;
-	u32 hfreq;
-	int hpct;
-
-	if (!per_cpu_info)
-		return 0;
-
-	spin_lock_irqsave(&freq_max_load_lock, flags);
-	max_demand_capacity = div64_u64(max_task_load(), max_possible_capacity);
-	for_each_cpu(cpu, cpumask) {
-		if (!per_cpu_info[cpu].ptable) {
-			ret = -EINVAL;
-			goto fail;
-		}
-
-		old_max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
-
-		/*
-		 * allocate len + 1 and leave the last power cost as 0 for
-		 * power_cost() can stop iterating index when
-		 * per_cpu_info[cpu].len > len of max_load due to race between
-		 * cpu power stats update and get_cpu_pwr_stats().
-		 */
-		max_load = kzalloc(sizeof(struct freq_max_load) +
-				   sizeof(struct freq_max_load_entry) *
-				   (per_cpu_info[cpu].len + 1), GFP_ATOMIC);
-		if (unlikely(!max_load)) {
-			ret = -ENOMEM;
-			goto fail;
-		}
-
-		max_load->length = per_cpu_info[cpu].len;
-
-		max_demand = max_demand_capacity *
-			     cpu_max_possible_capacity(cpu);
-
-		i = 0;
-		costs = per_cpu_info[cpu].ptable;
-		while (costs[i].freq) {
-			entry = &max_load->freqs[i];
-			freq = costs[i].freq;
-			hpct = get_freq_max_load(cpu, freq);
-			if (hpct <= 0 || hpct > 100)
-				hpct = 100;
-			hfreq = div64_u64((u64)freq * hpct, 100);
-			entry->hdemand =
-			    div64_u64(max_demand * hfreq,
-				      cpu_max_possible_freq(cpu));
-			i++;
-		}
-
-		rcu_assign_pointer(per_cpu(freq_max_load, cpu), max_load);
-		if (old_max_load)
-			kfree_rcu(old_max_load, rcu);
-	}
-
-	spin_unlock_irqrestore(&freq_max_load_lock, flags);
-	return 0;
-
-fail:
-	for_each_cpu(cpu, cpumask) {
-		max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
-		if (max_load) {
-			rcu_assign_pointer(per_cpu(freq_max_load, cpu), NULL);
-			kfree_rcu(max_load, rcu);
-		}
-	}
-
-	spin_unlock_irqrestore(&freq_max_load_lock, flags);
-	return ret;
-}
-
-unsigned long __weak arch_get_cpu_efficiency(int cpu)
-{
-	return SCHED_CAPACITY_SCALE;
-}
-
-int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	rq->static_cpu_pwr_cost = cost;
-	return 0;
-}
-
-unsigned int sched_get_static_cpu_pwr_cost(int cpu)
-{
-	return cpu_rq(cpu)->static_cpu_pwr_cost;
-}
-
-int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
-{
-	struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
-
-	cluster->static_cluster_pwr_cost = cost;
-	return 0;
-}
-
-unsigned int sched_get_static_cluster_pwr_cost(int cpu)
-{
-	return cpu_rq(cpu)->cluster->static_cluster_pwr_cost;
-}
-
-int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle)
-{
-	struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
-
-	cluster->wake_up_idle = !!wake_idle;
-	return 0;
-}
-
-unsigned int sched_get_cluster_wake_idle(int cpu)
-{
-	return cpu_rq(cpu)->cluster->wake_up_idle;
-}
-
-/*
- * Tasks that are runnable continuously for a period greather than
- * EARLY_DETECTION_DURATION can be flagged early as potential
- * high load tasks.
- */
-#define EARLY_DETECTION_DURATION 9500000
-
-/*
- * For increase, send notification if
- *      freq_required - cur_freq > sysctl_sched_freq_inc_notify
- */
-__read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */
-
-/*
- * For decrease, send notification if
- *      cur_freq - freq_required > sysctl_sched_freq_dec_notify
- */
-__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */
-__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
-
-/* Maximum allowed threshold before freq aggregation must be enabled */
-#define MAX_FREQ_AGGR_THRESH 1000
-
-#define for_each_related_thread_group(grp) \
-	list_for_each_entry(grp, &active_related_thread_groups, list)
-
-/* Size of bitmaps maintained to track top tasks */
-static const unsigned int top_tasks_bitmap_size =
-		BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
-
-__read_mostly unsigned int sysctl_sched_freq_aggregate = 1;
-
-/* A cpu can no longer accommodate more tasks if:
- *
- *	rq->nr_running > sysctl_sched_spill_nr_run ||
- *	rq->hmp_stats.cumulative_runnable_avg > sched_spill_load
- */
-unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
-
-/*
- * Place sync wakee tasks those have less than configured demand to the waker's
- * cluster.
- */
-unsigned int __read_mostly sched_small_wakee_task_load;
-unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;
-
-unsigned int __read_mostly sched_big_waker_task_load;
-unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;
-
-/*
- * CPUs with load greater than the sched_spill_load_threshold are not
- * eligible for task placement. When all CPUs in a cluster achieve a
- * load higher than this level, tasks becomes eligible for inter
- * cluster migration.
- */
-unsigned int __read_mostly sched_spill_load;
-unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
-
-/*
- * Prefer the waker CPU for sync wakee task, if the CPU has only 1 runnable
- * task. This eliminates the LPM exit latency associated with the idle
- * CPUs in the waker cluster.
- */
-unsigned int __read_mostly sysctl_sched_prefer_sync_wakee_to_waker;
-
-/*
- * Tasks whose bandwidth consumption on a cpu is more than
- * sched_upmigrate are considered "big" tasks. Big tasks will be
- * considered for "up" migration, i.e migrating to a cpu with better
- * capacity.
- */
-unsigned int __read_mostly sched_upmigrate;
-unsigned int __read_mostly sysctl_sched_upmigrate_pct = 80;
-
-/*
- * Big tasks, once migrated, will need to drop their bandwidth
- * consumption to less than sched_downmigrate before they are "down"
- * migrated.
- */
-unsigned int __read_mostly sched_downmigrate;
-unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
-
-/*
- * The load scale factor of a CPU gets boosted when its max frequency
- * is restricted due to which the tasks are migrating to higher capacity
- * CPUs early. The sched_upmigrate threshold is auto-upgraded by
- * rq->max_possible_freq/rq->max_freq of a lower capacity CPU.
- */
-unsigned int up_down_migrate_scale_factor = 1024;
-
-/*
- * Scheduler selects and places task to its previous CPU if sleep time is
- * less than sysctl_sched_select_prev_cpu_us.
- */
-unsigned int __read_mostly
-sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
-
-unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;
-
-unsigned int __read_mostly
-sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
-
-unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
-
-/*
- * Scheduler tries to avoid waking up idle CPUs for tasks running
- * in short bursts. If the task average burst is less than
- * sysctl_sched_short_burst nanoseconds and it sleeps on an average
- * for more than sysctl_sched_short_sleep nanoseconds, then the
- * task is eligible for packing.
- */
-unsigned int __read_mostly sysctl_sched_short_burst;
-unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC;
-
-static void _update_up_down_migrate(unsigned int *up_migrate,
-			unsigned int *down_migrate, bool is_group)
-{
-	unsigned int delta;
-
-	if (up_down_migrate_scale_factor == 1024)
-		return;
-
-	delta = *up_migrate - *down_migrate;
-
-	*up_migrate /= NSEC_PER_USEC;
-	*up_migrate *= up_down_migrate_scale_factor;
-	*up_migrate >>= 10;
-	*up_migrate *= NSEC_PER_USEC;
-
-	if (!is_group)
-		*up_migrate = min(*up_migrate, sched_ravg_window);
-
-	*down_migrate /= NSEC_PER_USEC;
-	*down_migrate *= up_down_migrate_scale_factor;
-	*down_migrate >>= 10;
-	*down_migrate *= NSEC_PER_USEC;
-
-	*down_migrate = min(*down_migrate, *up_migrate - delta);
-}
-
-static void update_up_down_migrate(void)
-{
-	unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
-	unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
-
-	_update_up_down_migrate(&up_migrate, &down_migrate, false);
-	sched_upmigrate = up_migrate;
-	sched_downmigrate = down_migrate;
-
-	up_migrate = pct_to_real(sysctl_sched_group_upmigrate_pct);
-	down_migrate = pct_to_real(sysctl_sched_group_downmigrate_pct);
-
-	_update_up_down_migrate(&up_migrate, &down_migrate, true);
-	sched_group_upmigrate = up_migrate;
-	sched_group_downmigrate = down_migrate;
-}
-
-void set_hmp_defaults(void)
-{
-	sched_spill_load =
-		pct_to_real(sysctl_sched_spill_load_pct);
-
-	update_up_down_migrate();
-
-	sched_init_task_load_windows =
-		div64_u64((u64)sysctl_sched_init_task_load_pct *
-			  (u64)sched_ravg_window, 100);
-
-	sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us *
-					   NSEC_PER_USEC;
-
-	sched_small_wakee_task_load =
-		div64_u64((u64)sysctl_sched_small_wakee_task_load_pct *
-			  (u64)sched_ravg_window, 100);
-
-	sched_big_waker_task_load =
-		div64_u64((u64)sysctl_sched_big_waker_task_load_pct *
-			  (u64)sched_ravg_window, 100);
-
-	sched_freq_aggregate_threshold =
-		pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
-}
-
-#ifdef CONFIG_CGROUP_SCHED
-
-int upmigrate_discouraged(struct task_struct *p)
-{
-	return task_group(p)->upmigrate_discouraged;
-}
-
-#else
-
-static inline int upmigrate_discouraged(struct task_struct *p)
-{
-	return 0;
-}
-
-#endif
-
-/* Is a task "big" on its current cpu */
-static inline int __is_big_task(struct task_struct *p, u64 scaled_load)
-{
-	int nice = task_nice(p);
-
-	if (nice > SCHED_UPMIGRATE_MIN_NICE || upmigrate_discouraged(p))
-		return 0;
-
-	return scaled_load > sched_upmigrate;
-}
-
-int is_big_task(struct task_struct *p)
-{
-	return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p)));
-}
-
-u64 cpu_load(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-
-	return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu);
-}
-
-u64 cpu_load_sync(int cpu, int sync)
-{
-	return scale_load_to_cpu(cpu_cravg_sync(cpu, sync), cpu);
-}
-
-/*
- * Task will fit on a cpu if it's bandwidth consumption on that cpu
- * will be less than sched_upmigrate. A big task that was previously
- * "up" migrated will be considered fitting on "little" cpu if its
- * bandwidth consumption on "little" cpu will be less than
- * sched_downmigrate. This will help avoid frequenty migrations for
- * tasks with load close to the upmigrate threshold
- */
-int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
-			      enum sched_boost_policy boost_policy)
-{
-	int upmigrate = sched_upmigrate;
-
-	if (cpu_capacity(cpu) == max_capacity)
-		return 1;
-
-	if (cpu_capacity(task_cpu(p)) > cpu_capacity(cpu))
-		upmigrate = sched_downmigrate;
-
-	if (boost_policy != SCHED_BOOST_ON_BIG) {
-		if (task_nice(p) > SCHED_UPMIGRATE_MIN_NICE ||
-		    upmigrate_discouraged(p))
-			return 1;
-
-		if (task_load < upmigrate)
-			return 1;
-	} else {
-		if (task_sched_boost(p) || task_load >= upmigrate)
-			return 0;
-
-		return 1;
-	}
-
-	return 0;
-}
-
-int task_will_fit(struct task_struct *p, int cpu)
-{
-	u64 tload = scale_load_to_cpu(task_load(p), cpu);
-
-	return task_load_will_fit(p, tload, cpu, sched_boost_policy());
-}
-
-/*
- * Return the cost of running task p on CPU cpu. This function
- * currently assumes that task p is the only task which will run on
- * the CPU.
- */
-unsigned int power_cost(int cpu, u64 demand)
-{
-	int first, mid, last;
-	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
-	struct cpu_pstate_pwr *costs;
-	struct freq_max_load *max_load;
-	int total_static_pwr_cost = 0;
-	struct rq *rq = cpu_rq(cpu);
-	unsigned int pc;
-
-	if (!per_cpu_info || !per_cpu_info[cpu].ptable)
-		/*
-		 * When power aware scheduling is not in use, or CPU
-		 * power data is not available, just use the CPU
-		 * capacity as a rough stand-in for real CPU power
-		 * numbers, assuming bigger CPUs are more power
-		 * hungry.
-		 */
-		return cpu_max_possible_capacity(cpu);
-
-	rcu_read_lock();
-	max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
-	if (!max_load) {
-		pc = cpu_max_possible_capacity(cpu);
-		goto unlock;
-	}
-
-	costs = per_cpu_info[cpu].ptable;
-
-	if (demand <= max_load->freqs[0].hdemand) {
-		pc = costs[0].power;
-		goto unlock;
-	} else if (demand > max_load->freqs[max_load->length - 1].hdemand) {
-		pc = costs[max_load->length - 1].power;
-		goto unlock;
-	}
-
-	first = 0;
-	last = max_load->length - 1;
-	mid = (last - first) >> 1;
-	while (1) {
-		if (demand <= max_load->freqs[mid].hdemand)
-			last = mid;
-		else
-			first = mid;
-
-		if (last - first == 1)
-			break;
-		mid = first + ((last - first) >> 1);
-	}
-
-	pc = costs[last].power;
-
-unlock:
-	rcu_read_unlock();
-
-	if (idle_cpu(cpu) && rq->cstate) {
-		total_static_pwr_cost += rq->static_cpu_pwr_cost;
-		if (rq->cluster->dstate)
-			total_static_pwr_cost +=
-				rq->cluster->static_cluster_pwr_cost;
-	}
-
-	return pc + total_static_pwr_cost;
-
-}
-
-struct sched_cluster *rq_cluster(struct rq *rq)
-{
-	return rq->cluster;
-}
-
-/*
- * reset_cpu_hmp_stats - reset HMP stats for a cpu
- *	nr_big_tasks
- *	cumulative_runnable_avg (iff reset_cra is true)
- */
-void reset_cpu_hmp_stats(int cpu, int reset_cra)
-{
-	reset_cfs_rq_hmp_stats(cpu, reset_cra);
-	reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra);
-}
-
-void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
-				struct task_struct *p, s64 delta)
-{
-	u64 new_task_load;
-	u64 old_task_load;
-
-	if (sched_disable_window_stats)
-		return;
-
-	old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
-	new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p));
-
-	if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load))
-		stats->nr_big_tasks--;
-	else if (!__is_big_task(p, old_task_load) &&
-		 __is_big_task(p, new_task_load))
-		stats->nr_big_tasks++;
-
-	BUG_ON(stats->nr_big_tasks < 0);
-}
-
-/*
- * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
- */
-static void update_nr_big_tasks(int cpu)
-{
-	struct rq *rq = cpu_rq(cpu);
-	struct task_struct *p;
-
-	/* Do not reset cumulative_runnable_avg */
-	reset_cpu_hmp_stats(cpu, 0);
-
-	list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
-		inc_hmp_sched_stats_fair(rq, p, 0);
-}
-
-/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
-void pre_big_task_count_change(const struct cpumask *cpus)
-{
-	int i;
-
-	local_irq_disable();
-
-	for_each_cpu(i, cpus)
-		raw_spin_lock(&cpu_rq(i)->lock);
-}
-
-/*
- * Reinitialize 'nr_big_tasks' counters on all affected cpus
- */
-void post_big_task_count_change(const struct cpumask *cpus)
-{
-	int i;
-
-	/* Assumes local_irq_disable() keeps online cpumap stable */
-	for_each_cpu(i, cpus)
-		update_nr_big_tasks(i);
-
-	for_each_cpu(i, cpus)
-		raw_spin_unlock(&cpu_rq(i)->lock);
-
-	local_irq_enable();
-}
-
-static inline int invalid_value_freq_input(unsigned int *data)
-{
-	if (data == &sysctl_sched_freq_aggregate)
-		return !(*data == 0 || *data == 1);
-
-	return 0;
-}
-
-static inline int invalid_value(unsigned int *data)
-{
-	unsigned int val = *data;
-
-	if (data == &sysctl_sched_ravg_hist_size)
-		return (val < 2 || val > RAVG_HIST_SIZE_MAX);
-
-	if (data == &sysctl_sched_window_stats_policy)
-		return val >= WINDOW_STATS_INVALID_POLICY;
-
-	return invalid_value_freq_input(data);
-}
-
-/*
- * Handle "atomic" update of sysctl_sched_window_stats_policy,
- * sysctl_sched_ravg_hist_size variables.
- */
-int sched_window_update_handler(struct ctl_table *table, int write,
-		void __user *buffer, size_t *lenp,
-		loff_t *ppos)
-{
-	int ret;
-	unsigned int *data = (unsigned int *)table->data;
-	unsigned int old_val;
-
-	mutex_lock(&policy_mutex);
-
-	old_val = *data;
-
-	ret = proc_dointvec(table, write, buffer, lenp, ppos);
-	if (ret || !write || (write && (old_val == *data)))
-		goto done;
-
-	if (invalid_value(data)) {
-		*data = old_val;
-		ret = -EINVAL;
-		goto done;
-	}
-
-	reset_all_window_stats(0, 0);
-
-done:
-	mutex_unlock(&policy_mutex);
-
-	return ret;
-}
-
-/*
- * Convert percentage value into absolute form. This will avoid div() operation
- * in fast path, to convert task load in percentage scale.
- */
-int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
-		void __user *buffer, size_t *lenp,
-		loff_t *ppos)
-{
-	int ret;
-	unsigned int old_val;
-	unsigned int *data = (unsigned int *)table->data;
-	int update_task_count = 0;
-
-	/*
-	 * The policy mutex is acquired with cpu_hotplug.lock
-	 * held from cpu_up()->cpufreq_governor_interactive()->
-	 * sched_set_window(). So enforce the same order here.
-	 */
-	if (write && (data == &sysctl_sched_upmigrate_pct)) {
-		update_task_count = 1;
-		get_online_cpus();
-	}
-
-	mutex_lock(&policy_mutex);
-
-	old_val = *data;
-
-	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
-	if (ret || !write)
-		goto done;
-
-	if (write && (old_val == *data))
-		goto done;
-
-	if (sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct ||
-				sysctl_sched_group_downmigrate_pct >
-				sysctl_sched_group_upmigrate_pct) {
-		*data = old_val;
-		ret = -EINVAL;
-		goto done;
-	}
-
-	/*
-	 * Big task tunable change will need to re-classify tasks on
-	 * runqueue as big and set their counters appropriately.
-	 * sysctl interface affects secondary variables (*_pct), which is then
-	 * "atomically" carried over to the primary variables. Atomic change
-	 * includes taking runqueue lock of all online cpus and re-initiatizing
-	 * their big counter values based on changed criteria.
-	 */
-	if (update_task_count)
-		pre_big_task_count_change(cpu_online_mask);
-
-	set_hmp_defaults();
-
-	if (update_task_count)
-		post_big_task_count_change(cpu_online_mask);
-
-done:
-	mutex_unlock(&policy_mutex);
-	if (update_task_count)
-		put_online_cpus();
-	return ret;
-}
-
-inline int nr_big_tasks(struct rq *rq)
-{
-	return rq->hmp_stats.nr_big_tasks;
-}
-
-unsigned int cpu_temp(int cpu)
-{
-	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
-
-	if (per_cpu_info)
-		return per_cpu_info[cpu].temp;
-	else
-		return 0;
-}
-
-/* Return task demand in percentage scale */
-unsigned int pct_task_load(struct task_struct *p)
-{
-	unsigned int load;
-
-	load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load());
-
-	return load;
-}
-
-static int __init set_sched_ravg_window(char *str)
-{
-	unsigned int window_size;
-
-	get_option(&str, &window_size);
-
-	if (window_size < MIN_SCHED_RAVG_WINDOW ||
-			window_size > MAX_SCHED_RAVG_WINDOW) {
-		WARN_ON(1);
-		return -EINVAL;
-	}
-
-	sched_ravg_window = window_size;
-	return 0;
-}
-
-early_param("sched_ravg_window", set_sched_ravg_window);
-
-#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
-
-static inline u64 scale_exec_time(u64 delta, struct rq *rq)
-{
-	u32 freq;
-
-	freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
-	delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
-	delta *= rq->cluster->exec_scale_factor;
-	delta >>= 10;
-
-	return delta;
-}
-
-/* Does freq_required sufficiently exceed or fall behind cur_freq? */
-static inline int
-nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
-{
-	int delta = freq_required - cur_freq;
-
-	if (freq_required > cur_freq)
-		return delta < sysctl_sched_freq_inc_notify;
-
-	delta = -delta;
-
-	return delta < sysctl_sched_freq_dec_notify;
-}
-
-/* Convert busy time to frequency equivalent */
-static inline unsigned int load_to_freq(struct rq *rq, u64 load)
-{
-	unsigned int freq;
-
-	load = scale_load_to_cpu(load, cpu_of(rq));
-	load *= 128;
-	load = div64_u64(load, max_task_load());
-
-	freq = load * cpu_max_possible_freq(cpu_of(rq));
-	freq /= 128;
-
-	return freq;
-}
-
-/*
- * Return load from all related groups in given frequency domain.
- */
-static void group_load_in_freq_domain(struct cpumask *cpus,
-				u64 *grp_load, u64 *new_grp_load)
-{
-	int j;
-
-	for_each_cpu(j, cpus) {
-		struct rq *rq = cpu_rq(j);
-
-		*grp_load += rq->grp_time.prev_runnable_sum;
-		*new_grp_load += rq->grp_time.nt_prev_runnable_sum;
-	}
-}
-
-/*
- * Should scheduler alert governor for changing frequency?
- *
- * @check_pred - evaluate frequency based on the predictive demand
- * @check_groups - add load from all related groups on given cpu
- *
- * check_groups is set to 1 if a "related" task movement/wakeup is triggering
- * the notification check. To avoid "re-aggregation" of demand in such cases,
- * we check whether the migrated/woken tasks demand (along with demand from
- * existing tasks on the cpu) can be met on target cpu
- *
- */
-
-static int send_notification(struct rq *rq, int check_pred, int check_groups)
-{
-	unsigned int cur_freq, freq_required;
-	unsigned long flags;
-	int rc = 0;
-	u64 group_load = 0, new_load  = 0;
-
-	if (check_pred) {
-		u64 prev = rq->old_busy_time;
-		u64 predicted = rq->hmp_stats.pred_demands_sum;
-
-		if (rq->cluster->cur_freq == cpu_max_freq(cpu_of(rq)))
-			return 0;
-
-		prev = max(prev, rq->old_estimated_time);
-		if (prev > predicted)
-			return 0;
-
-		cur_freq = load_to_freq(rq, prev);
-		freq_required = load_to_freq(rq, predicted);
-
-		if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
-			return 0;
-	} else {
-		/*
-		 * Protect from concurrent update of rq->prev_runnable_sum and
-		 * group cpu load
-		 */
-		raw_spin_lock_irqsave(&rq->lock, flags);
-		if (check_groups)
-			group_load = rq->grp_time.prev_runnable_sum;
-
-		new_load = rq->prev_runnable_sum + group_load;
-		new_load = freq_policy_load(rq, new_load);
-
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
-
-		cur_freq = load_to_freq(rq, rq->old_busy_time);
-		freq_required = load_to_freq(rq, new_load);
-
-		if (nearly_same_freq(cur_freq, freq_required))
-			return 0;
-	}
-
-	raw_spin_lock_irqsave(&rq->lock, flags);
-	if (!rq->cluster->notifier_sent) {
-		rq->cluster->notifier_sent = 1;
-		rc = 1;
-		trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq,
-				       new_load);
-	}
-	raw_spin_unlock_irqrestore(&rq->lock, flags);
-
-	return rc;
-}
-
-/* Alert governor if there is a need to change frequency */
-void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
-{
-	int cpu = cpu_of(rq);
-
-	if (!send_notification(rq, check_pred, check_groups))
-		return;
-
-	atomic_notifier_call_chain(
-		&load_alert_notifier_head, 0,
-		(void *)(long)cpu);
-}
-
-void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
-			     struct task_struct *p)
-{
-	bool check_groups;
-
-	rcu_read_lock();
-	check_groups = task_in_related_thread_group(p);
-	rcu_read_unlock();
-
-	if (!same_freq_domain(src_cpu, dest_cpu)) {
-		if (!src_cpu_dead)
-			check_for_freq_change(cpu_rq(src_cpu), false,
-					      check_groups);
-		check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
-	} else {
-		check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
-	}
-}
-
-#define INC_STEP 8
-#define DEC_STEP 2
-#define CONSISTENT_THRES 16
-#define INC_STEP_BIG 16
-/*
- * bucket_increase - update the count of all buckets
- *
- * @buckets: array of buckets tracking busy time of a task
- * @idx: the index of bucket to be incremented
- *
- * Each time a complete window finishes, count of bucket that runtime
- * falls in (@idx) is incremented. Counts of all other buckets are
- * decayed. The rate of increase and decay could be different based
- * on current count in the bucket.
- */
-static inline void bucket_increase(u8 *buckets, int idx)
-{
-	int i, step;
-
-	for (i = 0; i < NUM_BUSY_BUCKETS; i++) {
-		if (idx != i) {
-			if (buckets[i] > DEC_STEP)
-				buckets[i] -= DEC_STEP;
-			else
-				buckets[i] = 0;
-		} else {
-			step = buckets[i] >= CONSISTENT_THRES ?
-						INC_STEP_BIG : INC_STEP;
-			if (buckets[i] > U8_MAX - step)
-				buckets[i] = U8_MAX;
-			else
-				buckets[i] += step;
-		}
-	}
-}
-
-static inline int busy_to_bucket(u32 normalized_rt)
-{
-	int bidx;
-
-	bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load());
-	bidx = min(bidx, NUM_BUSY_BUCKETS - 1);
-
-	/*
-	 * Combine lowest two buckets. The lowest frequency falls into
-	 * 2nd bucket and thus keep predicting lowest bucket is not
-	 * useful.
-	 */
-	if (!bidx)
-		bidx++;
-
-	return bidx;
-}
-
-/*
- * get_pred_busy - calculate predicted demand for a task on runqueue
- *
- * @rq: runqueue of task p
- * @p: task whose prediction is being updated
- * @start: starting bucket. returned prediction should not be lower than
- *         this bucket.
- * @runtime: runtime of the task. returned prediction should not be lower
- *           than this runtime.
- * Note: @start can be derived from @runtime. It's passed in only to
- * avoid duplicated calculation in some cases.
- *
- * A new predicted busy time is returned for task @p based on @runtime
- * passed in. The function searches through buckets that represent busy
- * time equal to or bigger than @runtime and attempts to find the bucket to
- * to use for prediction. Once found, it searches through historical busy
- * time and returns the latest that falls into the bucket. If no such busy
- * time exists, it returns the medium of that bucket.
- */
-static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
-				int start, u32 runtime)
-{
-	int i;
-	u8 *buckets = p->ravg.busy_buckets;
-	u32 *hist = p->ravg.sum_history;
-	u32 dmin, dmax;
-	u64 cur_freq_runtime = 0;
-	int first = NUM_BUSY_BUCKETS, final;
-	u32 ret = runtime;
-
-	/* skip prediction for new tasks due to lack of history */
-	if (unlikely(is_new_task(p)))
-		goto out;
-
-	/* find minimal bucket index to pick */
-	for (i = start; i < NUM_BUSY_BUCKETS; i++) {
-		if (buckets[i]) {
-			first = i;
-			break;
-		}
-	}
-	/* if no higher buckets are filled, predict runtime */
-	if (first >= NUM_BUSY_BUCKETS)
-		goto out;
-
-	/* compute the bucket for prediction */
-	final = first;
-
-	/* determine demand range for the predicted bucket */
-	if (final < 2) {
-		/* lowest two buckets are combined */
-		dmin = 0;
-		final = 1;
-	} else {
-		dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS);
-	}
-	dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS);
-
-	/*
-	 * search through runtime history and return first runtime that falls
-	 * into the range of predicted bucket.
-	 */
-	for (i = 0; i < sched_ravg_hist_size; i++) {
-		if (hist[i] >= dmin && hist[i] < dmax) {
-			ret = hist[i];
-			break;
-		}
-	}
-	/* no historical runtime within bucket found, use average of the bin */
-	if (ret < dmin)
-		ret = (dmin + dmax) / 2;
-	/*
-	 * when updating in middle of a window, runtime could be higher
-	 * than all recorded history. Always predict at least runtime.
-	 */
-	ret = max(runtime, ret);
-out:
-	trace_sched_update_pred_demand(rq, p, runtime,
-		mult_frac((unsigned int)cur_freq_runtime, 100,
-			  sched_ravg_window), ret);
-	return ret;
-}
-
-static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
-{
-	if (p->ravg.pred_demand >= p->ravg.curr_window)
-		return p->ravg.pred_demand;
-
-	return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
-			     p->ravg.curr_window);
-}
-
-static void reset_all_task_stats(void)
-{
-	struct task_struct *g, *p;
-
-	do_each_thread(g, p) {
-		reset_task_stats(p);
-	}  while_each_thread(g, p);
-}
-
-enum reset_reason_code {
-	WINDOW_CHANGE,
-	POLICY_CHANGE,
-	HIST_SIZE_CHANGE,
-	FREQ_AGGREGATE_CHANGE,
-};
-
-const char *sched_window_reset_reasons[] = {
-	"WINDOW_CHANGE",
-	"POLICY_CHANGE",
-	"HIST_SIZE_CHANGE",
-};
-
-/* Called with IRQs enabled */
-void reset_all_window_stats(u64 window_start, unsigned int window_size)
-{
-	int cpu, i;
-	unsigned long flags;
-	u64 start_ts = sched_ktime_clock();
-	int reason = WINDOW_CHANGE;
-	unsigned int old = 0, new = 0;
-
-	local_irq_save(flags);
-
-	read_lock(&tasklist_lock);
-
-	read_lock(&related_thread_group_lock);
-
-	/* Taking all runqueue locks prevents race with sched_exit(). */
-	for_each_possible_cpu(cpu)
-		raw_spin_lock(&cpu_rq(cpu)->lock);
-
-	sched_disable_window_stats = 1;
-
-	reset_all_task_stats();
-
-	read_unlock(&tasklist_lock);
-
-	if (window_size) {
-		sched_ravg_window = window_size * TICK_NSEC;
-		set_hmp_defaults();
-		sched_load_granule = sched_ravg_window / NUM_LOAD_INDICES;
-	}
-
-	sched_disable_window_stats = 0;
-
-	for_each_possible_cpu(cpu) {
-		struct rq *rq = cpu_rq(cpu);
-
-		if (window_start)
-			rq->window_start = window_start;
-		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
-		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
-		memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
-		for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
-			memset(&rq->load_subs[i], 0,
-					sizeof(struct load_subtractions));
-			clear_top_tasks_table(rq->top_tasks[i]);
-			clear_top_tasks_bitmap(rq->top_tasks_bitmap[i]);
-		}
-
-		rq->curr_table = 0;
-		rq->curr_top = 0;
-		rq->prev_top = 0;
-		reset_cpu_hmp_stats(cpu, 1);
-	}
-
-	if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
-		reason = POLICY_CHANGE;
-		old = sched_window_stats_policy;
-		new = sysctl_sched_window_stats_policy;
-		sched_window_stats_policy = sysctl_sched_window_stats_policy;
-	} else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) {
-		reason = HIST_SIZE_CHANGE;
-		old = sched_ravg_hist_size;
-		new = sysctl_sched_ravg_hist_size;
-		sched_ravg_hist_size = sysctl_sched_ravg_hist_size;
-	} else if (sched_freq_aggregate !=
-					sysctl_sched_freq_aggregate) {
-		reason = FREQ_AGGREGATE_CHANGE;
-		old = sched_freq_aggregate;
-		new = sysctl_sched_freq_aggregate;
-		sched_freq_aggregate = sysctl_sched_freq_aggregate;
-	}
-
-	for_each_possible_cpu(cpu)
-		raw_spin_unlock(&cpu_rq(cpu)->lock);
-
-	read_unlock(&related_thread_group_lock);
-
-	local_irq_restore(flags);
-
-	trace_sched_reset_all_window_stats(window_start, window_size,
-		sched_ktime_clock() - start_ts, reason, old, new);
-}
-
-void sched_get_cpus_busy(struct sched_load *busy,
-			 const struct cpumask *query_cpus)
-{
-	unsigned long flags;
-	struct rq *rq;
-	const int cpus = cpumask_weight(query_cpus);
-	u64 load[cpus], group_load[cpus];
-	u64 nload[cpus], ngload[cpus];
-	u64 pload[cpus];
-	unsigned int max_freq[cpus];
-	int notifier_sent = 0;
-	int early_detection[cpus];
-	int cpu, i = 0;
-	unsigned int window_size;
-	u64 max_prev_sum = 0;
-	int max_busy_cpu = cpumask_first(query_cpus);
-	u64 total_group_load = 0, total_ngload = 0;
-	bool aggregate_load = false;
-	struct sched_cluster *cluster = cpu_cluster(cpumask_first(query_cpus));
-
-	if (unlikely(cpus == 0))
-		return;
-
-	local_irq_save(flags);
-
-	/*
-	 * This function could be called in timer context, and the
-	 * current task may have been executing for a long time. Ensure
-	 * that the window stats are current by doing an update.
-	 */
-
-	for_each_cpu(cpu, query_cpus)
-		raw_spin_lock(&cpu_rq(cpu)->lock);
-
-	window_size = sched_ravg_window;
-
-	/*
-	 * We don't really need the cluster lock for this entire for loop
-	 * block. However, there is no advantage in optimizing this as rq
-	 * locks are held regardless and would prevent migration anyways
-	 */
-	raw_spin_lock(&cluster->load_lock);
-
-	for_each_cpu(cpu, query_cpus) {
-		rq = cpu_rq(cpu);
-
-		update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
-				 0);
-
-		/*
-		 * Ensure that we don't report load for 'cpu' again via the
-		 * cpufreq_update_util path in the window that started at
-		 * rq->window_start
-		 */
-		rq->load_reported_window = rq->window_start;
-
-		account_load_subtractions(rq);
-		load[i] = rq->prev_runnable_sum;
-		nload[i] = rq->nt_prev_runnable_sum;
-		pload[i] = rq->hmp_stats.pred_demands_sum;
-		rq->old_estimated_time = pload[i];
-
-		if (load[i] > max_prev_sum) {
-			max_prev_sum = load[i];
-			max_busy_cpu = cpu;
-		}
-
-		/*
-		 * sched_get_cpus_busy() is called for all CPUs in a
-		 * frequency domain. So the notifier_sent flag per
-		 * cluster works even when a frequency domain spans
-		 * more than 1 cluster.
-		 */
-		if (rq->cluster->notifier_sent) {
-			notifier_sent = 1;
-			rq->cluster->notifier_sent = 0;
-		}
-		early_detection[i] = (rq->ed_task != NULL);
-		max_freq[i] = cpu_max_freq(cpu);
-		i++;
-	}
-
-	raw_spin_unlock(&cluster->load_lock);
-
-	group_load_in_freq_domain(
-			&cpu_rq(max_busy_cpu)->freq_domain_cpumask,
-			&total_group_load, &total_ngload);
-	aggregate_load = !!(total_group_load > sched_freq_aggregate_threshold);
-
-	i = 0;
-	for_each_cpu(cpu, query_cpus) {
-		group_load[i] = 0;
-		ngload[i] = 0;
-
-		if (early_detection[i])
-			goto skip_early;
-
-		rq = cpu_rq(cpu);
-		if (aggregate_load) {
-			if (cpu == max_busy_cpu) {
-				group_load[i] = total_group_load;
-				ngload[i] = total_ngload;
-			}
-		} else {
-			group_load[i] = rq->grp_time.prev_runnable_sum;
-			ngload[i] = rq->grp_time.nt_prev_runnable_sum;
-		}
-
-		load[i] += group_load[i];
-		nload[i] += ngload[i];
-
-		load[i] = freq_policy_load(rq, load[i]);
-		rq->old_busy_time = load[i];
-
-		/*
-		 * Scale load in reference to cluster max_possible_freq.
-		 *
-		 * Note that scale_load_to_cpu() scales load in reference to
-		 * the cluster max_freq.
-		 */
-		load[i] = scale_load_to_cpu(load[i], cpu);
-		nload[i] = scale_load_to_cpu(nload[i], cpu);
-		pload[i] = scale_load_to_cpu(pload[i], cpu);
-skip_early:
-		i++;
-	}
-
-	for_each_cpu(cpu, query_cpus)
-		raw_spin_unlock(&(cpu_rq(cpu))->lock);
-
-	local_irq_restore(flags);
-
-	i = 0;
-	for_each_cpu(cpu, query_cpus) {
-		rq = cpu_rq(cpu);
-
-		if (early_detection[i]) {
-			busy[i].prev_load = div64_u64(sched_ravg_window,
-							NSEC_PER_USEC);
-			busy[i].new_task_load = 0;
-			busy[i].predicted_load = 0;
-			goto exit_early;
-		}
-
-		load[i] = scale_load_to_freq(load[i], max_freq[i],
-				cpu_max_possible_freq(cpu));
-		nload[i] = scale_load_to_freq(nload[i], max_freq[i],
-				cpu_max_possible_freq(cpu));
-
-		pload[i] = scale_load_to_freq(pload[i], max_freq[i],
-					     rq->cluster->max_possible_freq);
-
-		busy[i].prev_load = div64_u64(load[i], NSEC_PER_USEC);
-		busy[i].new_task_load = div64_u64(nload[i], NSEC_PER_USEC);
-		busy[i].predicted_load = div64_u64(pload[i], NSEC_PER_USEC);
-
-exit_early:
-		trace_sched_get_busy(cpu, busy[i].prev_load,
-				     busy[i].new_task_load,
-				     busy[i].predicted_load,
-				     early_detection[i]);
-		i++;
-	}
-}
-
-int sched_set_window(u64 window_start, unsigned int window_size)
-{
-	u64 now, cur_jiffies, jiffy_ktime_ns;
-	s64 ws;
-	unsigned long flags;
-
-	if (window_size * TICK_NSEC <  MIN_SCHED_RAVG_WINDOW)
-		return -EINVAL;
-
-	mutex_lock(&policy_mutex);
-
-	/*
-	 * Get a consistent view of ktime, jiffies, and the time
-	 * since the last jiffy (based on last_jiffies_update).
-	 */
-	local_irq_save(flags);
-	cur_jiffies = jiffy_to_ktime_ns(&now, &jiffy_ktime_ns);
-	local_irq_restore(flags);
-
-	/* translate window_start from jiffies to nanoseconds */
-	ws = (window_start - cur_jiffies); /* jiffy difference */
-	ws *= TICK_NSEC;
-	ws += jiffy_ktime_ns;
-
-	/*
-	 * Roll back calculated window start so that it is in
-	 * the past (window stats must have a current window).
-	 */
-	while (ws > now)
-		ws -= (window_size * TICK_NSEC);
-
-	BUG_ON(sched_ktime_clock() < ws);
-
-	reset_all_window_stats(ws, window_size);
-
-	sched_update_freq_max_load(cpu_possible_mask);
-
-	mutex_unlock(&policy_mutex);
-
-	return 0;
-}
-
-static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
-{
-	rq->load_subs[index].window_start = ws;
-	rq->load_subs[index].subs = 0;
-	rq->load_subs[index].new_subs = 0;
-}
-
-#define sched_up_down_migrate_auto_update 1
-static void check_for_up_down_migrate_update(const struct cpumask *cpus)
-{
-	int i = cpumask_first(cpus);
-
-	if (!sched_up_down_migrate_auto_update)
-		return;
-
-	if (cpu_max_possible_capacity(i) == max_possible_capacity)
-		return;
-
-	if (cpu_max_possible_freq(i) == cpu_max_freq(i))
-		up_down_migrate_scale_factor = 1024;
-	else
-		up_down_migrate_scale_factor = (1024 *
-				 cpu_max_possible_freq(i)) / cpu_max_freq(i);
-
-	update_up_down_migrate();
-}
-
-void update_cpu_cluster_capacity(const cpumask_t *cpus)
-{
-	int i;
-	struct sched_cluster *cluster;
-	struct cpumask cpumask;
-
-	cpumask_copy(&cpumask, cpus);
-	pre_big_task_count_change(cpu_possible_mask);
-
-	for_each_cpu(i, &cpumask) {
-		cluster = cpu_rq(i)->cluster;
-		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
-
-		cluster->capacity = compute_capacity(cluster);
-		cluster->load_scale_factor = compute_load_scale_factor(cluster);
-
-		/* 'cpus' can contain cpumask more than one cluster */
-		check_for_up_down_migrate_update(&cluster->cpus);
-	}
-
-	__update_min_max_capacity();
-
-	post_big_task_count_change(cpu_possible_mask);
-}
-
-static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
-void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
-{
-	struct cpumask cpumask;
-	struct sched_cluster *cluster;
-	int i, update_capacity = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
-	cpumask_copy(&cpumask, cpus);
-	for_each_cpu(i, &cpumask) {
-		cluster = cpu_rq(i)->cluster;
-		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
-
-		update_capacity += (cluster->max_mitigated_freq != fmax);
-		cluster->max_mitigated_freq = fmax;
-	}
-	spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
-
-	if (update_capacity)
-		update_cpu_cluster_capacity(cpus);
-}
-
-static int cpufreq_notifier_trans(struct notifier_block *nb,
-		unsigned long val, void *data)
-{
-	struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
-	unsigned int cpu = freq->cpu, new_freq = freq->new;
-	unsigned long flags;
-	struct sched_cluster *cluster;
-	struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask;
-	int i, j;
-
-	if (val != CPUFREQ_POSTCHANGE)
-		return 0;
-
-	BUG_ON(!new_freq);
-
-	if (cpu_cur_freq(cpu) == new_freq)
-		return 0;
-
-	for_each_cpu(i, &policy_cpus) {
-		cluster = cpu_rq(i)->cluster;
-
-		for_each_cpu(j, &cluster->cpus) {
-			struct rq *rq = cpu_rq(j);
-
-			raw_spin_lock_irqsave(&rq->lock, flags);
-			update_task_ravg(rq->curr, rq, TASK_UPDATE,
-						sched_ktime_clock(), 0);
-			raw_spin_unlock_irqrestore(&rq->lock, flags);
-		}
-
-		cluster->cur_freq = new_freq;
-		cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus);
-	}
-
-	return 0;
-}
-
-static int pwr_stats_ready_notifier(struct notifier_block *nb,
-				    unsigned long cpu, void *data)
-{
-	cpumask_t mask = CPU_MASK_NONE;
-
-	cpumask_set_cpu(cpu, &mask);
-	sched_update_freq_max_load(&mask);
-
-	mutex_lock(&cluster_lock);
-	sort_clusters();
-	mutex_unlock(&cluster_lock);
-
-	return 0;
-}
-
-static struct notifier_block notifier_trans_block = {
-	.notifier_call = cpufreq_notifier_trans
-};
-
-static struct notifier_block notifier_pwr_stats_ready = {
-	.notifier_call = pwr_stats_ready_notifier
-};
-
-int __weak register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
-{
-	return -EINVAL;
-}
-
-static int register_sched_callback(void)
-{
-	cpufreq_register_notifier(&notifier_trans_block,
-				  CPUFREQ_TRANSITION_NOTIFIER);
-
-	register_cpu_pwr_stats_ready_notifier(&notifier_pwr_stats_ready);
-
-	return 0;
-}
-
-/*
- * cpufreq callbacks can be registered at core_initcall or later time.
- * Any registration done prior to that is "forgotten" by cpufreq. See
- * initialization of variable init_cpufreq_transition_notifier_list_called
- * for further information.
- */
-core_initcall(register_sched_callback);
-
-void update_avg_burst(struct task_struct *p)
-{
-	update_avg(&p->ravg.avg_burst, p->ravg.curr_burst);
-	p->ravg.curr_burst = 0;
-}
-
-void note_task_waking(struct task_struct *p, u64 wallclock)
-{
-	u64 sleep_time = wallclock - p->last_switch_out_ts;
-
-	p->last_wake_ts = wallclock;
-	update_avg(&p->ravg.avg_sleep_time, sleep_time);
-}
-
-#ifdef CONFIG_CGROUP_SCHED
-u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
-					  struct cftype *cft)
-{
-	struct task_group *tg = css_tg(css);
-
-	return tg->upmigrate_discouraged;
-}
-
-int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
-				struct cftype *cft, u64 upmigrate_discourage)
-{
-	struct task_group *tg = css_tg(css);
-	int discourage = upmigrate_discourage > 0;
-
-	if (tg->upmigrate_discouraged == discourage)
-		return 0;
-
-	/*
-	 * Revisit big-task classification for tasks of this cgroup. It would
-	 * have been efficient to walk tasks of just this cgroup in running
-	 * state, but we don't have easy means to do that. Walk all tasks in
-	 * running state on all cpus instead and re-visit their big task
-	 * classification.
-	 */
-	get_online_cpus();
-	pre_big_task_count_change(cpu_online_mask);
-
-	tg->upmigrate_discouraged = discourage;
-
-	post_big_task_count_change(cpu_online_mask);
-	put_online_cpus();
-
-	return 0;
-}
-#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 65b34b4..2b556d0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -39,21 +39,6 @@
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
-#ifdef CONFIG_SCHED_HMP
-static int
-select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
-{
-	int target;
-
-	rcu_read_lock();
-	target = find_lowest_rq(p);
-	if (target != -1)
-		cpu = target;
-	rcu_read_unlock();
-
-	return cpu;
-}
-#endif /* CONFIG_SCHED_HMP */
 #endif /* CONFIG_SMP */
 #else  /* CONFIG_SCHED_WALT */
 
@@ -63,7 +48,7 @@
 static inline void
 dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
 
-#endif	/* CONFIG_SCHED_HMP */
+#endif	/* CONFIG_SCHED_WALT */
 
 #include "walt.h"
 
@@ -1515,10 +1500,6 @@
 	struct rq *rq;
 	bool may_not_preempt;
 
-#ifdef CONFIG_SCHED_HMP
-	return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
-#endif
-
 	/* For anything but wake ups, just return the task_cpu */
 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
 		goto out;
@@ -1771,93 +1752,6 @@
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
-#ifdef CONFIG_SCHED_HMP
-static int find_lowest_rq_hmp(struct task_struct *task)
-{
-	struct cpumask *lowest_mask = *this_cpu_ptr(&local_cpu_mask);
-	struct cpumask candidate_mask = CPU_MASK_NONE;
-	struct sched_cluster *cluster;
-	int best_cpu = -1;
-	int prev_cpu = task_cpu(task);
-	u64 cpu_load, min_load = ULLONG_MAX;
-	int i;
-	int restrict_cluster;
-	int boost_on_big;
-	int pack_task, wakeup_latency, least_wakeup_latency = INT_MAX;
-
-	boost_on_big = sched_boost() == FULL_THROTTLE_BOOST &&
-			sched_boost_policy() == SCHED_BOOST_ON_BIG;
-
-	restrict_cluster = sysctl_sched_restrict_cluster_spill;
-
-	/* Make sure the mask is initialized first */
-	if (unlikely(!lowest_mask))
-		return best_cpu;
-
-	if (task->nr_cpus_allowed == 1)
-		return best_cpu; /* No other targets possible */
-
-	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
-		return best_cpu; /* No targets found */
-
-	pack_task = is_short_burst_task(task);
-
-	/*
-	 * At this point we have built a mask of cpus representing the
-	 * lowest priority tasks in the system.  Now we want to elect
-	 * the best one based on our affinity and topology.
-	 */
-
-	for_each_sched_cluster(cluster) {
-		if (boost_on_big && cluster->capacity != max_possible_capacity)
-			continue;
-
-		cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
-		cpumask_andnot(&candidate_mask, &candidate_mask,
-			       cpu_isolated_mask);
-
-		if (cpumask_empty(&candidate_mask))
-			continue;
-
-		for_each_cpu(i, &candidate_mask) {
-			if (sched_cpu_high_irqload(i))
-				continue;
-
-			cpu_load = cpu_rq(i)->hmp_stats.cumulative_runnable_avg;
-			if (!restrict_cluster)
-				cpu_load = scale_load_to_cpu(cpu_load, i);
-
-			if (pack_task) {
-				wakeup_latency = cpu_rq(i)->wakeup_latency;
-
-				if (wakeup_latency > least_wakeup_latency)
-					continue;
-
-				if (wakeup_latency < least_wakeup_latency) {
-					least_wakeup_latency = wakeup_latency;
-					min_load = cpu_load;
-					best_cpu = i;
-					continue;
-				}
-			}
-
-			if (cpu_load < min_load ||
-				(cpu_load == min_load &&
-				(i == prev_cpu || (best_cpu != prev_cpu &&
-				cpus_share_cache(prev_cpu, i))))) {
-				min_load = cpu_load;
-				best_cpu = i;
-			}
-		}
-
-		if (restrict_cluster && best_cpu != -1)
-			break;
-	}
-
-	return best_cpu;
-}
-#endif	/* CONFIG_SCHED_HMP */
-
 static inline unsigned long task_util(struct task_struct *p)
 {
 #ifdef CONFIG_SCHED_WALT
@@ -1888,10 +1782,6 @@
 	long max_spare_cap = -LONG_MAX;
 	bool placement_boost;
 
-#ifdef CONFIG_SCHED_HMP
-	return find_lowest_rq_hmp(task);
-#endif
-
 	/* Make sure the mask is initialized first */
 	if (unlikely(!lowest_mask))
 		return -1;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 204a0e1..16ed5d7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -318,10 +318,6 @@
 struct task_group {
 	struct cgroup_subsys_state css;
 
-#ifdef CONFIG_SCHED_HMP
-	bool upmigrate_discouraged;
-#endif
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	/* schedulable entities of this group on each cpu */
 	struct sched_entity **se;
@@ -1312,7 +1308,6 @@
 #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
 #define WF_FORK		0x02		/* child wakeup after fork */
 #define WF_MIGRATED	0x4		/* internal use, task got migrated */
-#define WF_NO_NOTIFIER	0x08		/* do not notify governor */
 
 /*
  * To aid in avoiding the subversion of "niceness" due to uneven distribution
@@ -2212,12 +2207,14 @@
 #ifdef CONFIG_SCHED_WALT
 u64 sched_ktime_clock(void);
 void note_task_waking(struct task_struct *p, u64 wallclock);
+extern void update_avg_burst(struct task_struct *p);
 #else /* CONFIG_SCHED_WALT */
 static inline u64 sched_ktime_clock(void)
 {
 	return 0;
 }
 static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
+static inline void update_avg_burst(struct task_struct *p) { }
 #endif /* CONFIG_SCHED_WALT */
 
 #ifdef CONFIG_CPU_FREQ
@@ -2711,14 +2708,10 @@
 extern void clear_ed_task(struct task_struct *p, struct rq *rq);
 extern bool early_detection_notify(struct rq *rq, u64 wallclock);
 
-#ifdef CONFIG_SCHED_HMP
-extern unsigned int power_cost(int cpu, u64 demand);
-#else
 static inline unsigned int power_cost(int cpu, u64 demand)
 {
 	return cpu_max_possible_capacity(cpu);
 }
-#endif
 
 #else	/* CONFIG_SCHED_WALT */
 
@@ -2881,86 +2874,7 @@
 
 #endif	/* CONFIG_SCHED_WALT */
 
-#ifdef CONFIG_SCHED_HMP
-#define energy_aware() false
-
-extern int is_big_task(struct task_struct *p);
-extern unsigned int pct_task_load(struct task_struct *p);
-extern void notify_migration(int src_cpu, int dest_cpu,
-			bool src_cpu_dead, struct task_struct *p);
-extern void note_task_waking(struct task_struct *p, u64 wallclock);
-extern void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
-extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
-					struct task_struct *p, s64 delta);
-extern unsigned int cpu_temp(int cpu);
-extern void pre_big_task_count_change(const struct cpumask *cpus);
-extern void post_big_task_count_change(const struct cpumask *cpus);
-extern void set_hmp_defaults(void);
-extern void update_avg_burst(struct task_struct *p);
-extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
-
-extern unsigned int nr_eligible_big_tasks(int cpu);
-
-static inline void
-inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
-	if (sched_disable_window_stats)
-		return;
-
-	if (is_big_task(p))
-		stats->nr_big_tasks++;
-}
-
-static inline void
-dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
-{
-	if (sched_disable_window_stats)
-		return;
-
-	if (is_big_task(p))
-		stats->nr_big_tasks--;
-
-	BUG_ON(stats->nr_big_tasks < 0);
-}
-
-static inline bool is_short_burst_task(struct task_struct *p)
-{
-	return p->ravg.avg_burst < sysctl_sched_short_burst &&
-	       p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
-}
-
-#else
 static inline bool energy_aware(void)
 {
 	return sched_feat(ENERGY_AWARE);
 }
-
-static inline int pct_task_load(struct task_struct *p) { return 0; }
-
-static inline void notify_migration(int src_cpu, int dest_cpu,
-			bool src_cpu_dead, struct task_struct *p) { }
-
-static inline void
-check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
-
-static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
-				      struct task_struct *p, s64 delta) { }
-
-static inline unsigned int cpu_temp(int cpu)
-{
-	return 0;
-}
-
-static inline void pre_big_task_count_change(const struct cpumask *cpus) { }
-
-static inline void post_big_task_count_change(const struct cpumask *cpus) { }
-
-static inline void set_hmp_defaults(void) { }
-
-static inline void update_avg_burst(struct task_struct *p) { }
-
-static inline void set_task_last_switch_out(struct task_struct *p,
-					    u64 wallclock) { }
-
-#endif /* CONFIG_SCHED_HMP */
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index bf8afc7..bef6d73 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -612,40 +612,6 @@
 	raw_spin_unlock(&cluster->load_lock);
 }
 
-#ifdef CONFIG_SCHED_HMP
-static inline void
-init_new_task_load_hmp(struct task_struct *p, bool idle_task)
-{
-	p->ravg.curr_burst = 0;
-	/*
-	 * Initialize the avg_burst to twice the threshold, so that
-	 * a task would not be classified as short burst right away
-	 * after fork. It takes at least 6 sleep-wakeup cycles for
-	 * the avg_burst to go below the threshold.
-	 */
-	p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
-	p->ravg.avg_sleep_time = 0;
-}
-
-static inline void
-update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime)
-{
-	/*
-	 * update_task_demand() has checks for idle task and
-	 * exit task. The runtime may include the wait time,
-	 * so update the burst only for the cases where the
-	 * task is running.
-	 */
-	if (event == PUT_PREV_TASK || (event == TASK_UPDATE &&
-				rq->curr == p))
-		p->ravg.curr_burst += runtime;
-}
-
-static void reset_task_stats_hmp(struct task_struct *p)
-{
-	p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst;
-}
-#else
 static inline void
 init_new_task_load_hmp(struct task_struct *p, bool idle_task)
 {
@@ -659,7 +625,6 @@
 static void reset_task_stats_hmp(struct task_struct *p)
 {
 }
-#endif
 
 static inline void inter_cluster_migration_fixup
 	(struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
@@ -3057,7 +3022,6 @@
 	core_ctl_check(this_rq()->window_start);
 }
 
-#ifndef CONFIG_SCHED_HMP
 int walt_proc_update_handler(struct ctl_table *table, int write,
 			     void __user *buffer, size_t *lenp,
 			     loff_t *ppos)
@@ -3085,4 +3049,3 @@
 
 	return ret;
 }
-#endif
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 887933f..da27ec6 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -65,7 +65,6 @@
 
 extern unsigned int nr_eligible_big_tasks(int cpu);
 
-#ifndef CONFIG_SCHED_HMP
 static inline void
 inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
 {
@@ -87,7 +86,6 @@
 
 	BUG_ON(stats->nr_big_tasks < 0);
 }
-#endif
 
 static inline void
 adjust_nr_big_tasks(struct hmp_sched_stats *stats, int delta, bool inc)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 11ec278..1d894fc 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -128,9 +128,6 @@
 static unsigned long one_ul = 1;
 static int one_hundred = 100;
 static int one_thousand = 1000;
-#ifdef CONFIG_SCHED_HMP
-static int max_freq_reporting_policy = FREQ_REPORT_INVALID_POLICY - 1;
-#endif
 #ifdef CONFIG_PRINTK
 static int ten_thousand = 10000;
 #endif
@@ -305,11 +302,7 @@
 		.data		= &sysctl_sched_group_upmigrate_pct,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-#ifdef CONFIG_SCHED_HMP
-		.proc_handler	= sched_hmp_proc_update_handler,
-#else
 		.proc_handler	= walt_proc_update_handler,
-#endif
 		.extra1		= &sysctl_sched_group_downmigrate_pct,
 	},
 	{
@@ -317,194 +310,11 @@
 		.data		= &sysctl_sched_group_downmigrate_pct,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-#ifdef CONFIG_SCHED_HMP
-		.proc_handler	= sched_hmp_proc_update_handler,
-#else
 		.proc_handler	= walt_proc_update_handler,
-#endif
 		.extra1		= &zero,
 		.extra2		= &sysctl_sched_group_upmigrate_pct,
 	},
 #endif
-#ifdef CONFIG_SCHED_HMP
-	{
-		.procname	= "sched_freq_reporting_policy",
-		.data		= &sysctl_sched_freq_reporting_policy,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-		.extra2		= &max_freq_reporting_policy,
-	},
-	{
-		.procname	= "sched_freq_inc_notify",
-		.data		= &sysctl_sched_freq_inc_notify,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-	},
-	{
-		.procname	= "sched_freq_dec_notify",
-		.data		= &sysctl_sched_freq_dec_notify,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-	},
-	{
-		.procname       = "sched_ravg_hist_size",
-		.data           = &sysctl_sched_ravg_hist_size,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = sched_window_update_handler,
-	},
-	{
-		.procname       = "sched_window_stats_policy",
-		.data           = &sysctl_sched_window_stats_policy,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = sched_window_update_handler,
-	},
-	{
-		.procname	= "sched_spill_load",
-		.data		= &sysctl_sched_spill_load_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-		.extra2		= &one_hundred,
-	},
-	{
-		.procname	= "sched_spill_nr_run",
-		.data		= &sysctl_sched_spill_nr_run,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-	},
-	{
-		.procname	= "sched_upmigrate",
-		.data		= &sysctl_sched_upmigrate_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-		.extra2		= &one_hundred,
-	},
-	{
-		.procname	= "sched_downmigrate",
-		.data		= &sysctl_sched_downmigrate_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-		.extra2		= &one_hundred,
-	},
-	{
-		.procname	= "sched_init_task_load",
-		.data		= &sysctl_sched_init_task_load_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-		.extra2		= &one_hundred,
-	},
-	{
-		.procname	= "sched_select_prev_cpu_us",
-		.data		= &sysctl_sched_select_prev_cpu_us,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler   = sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-	},
-	{
-		.procname	= "sched_restrict_cluster_spill",
-		.data		= &sysctl_sched_restrict_cluster_spill,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-		.extra2		= &one,
-	},
-	{
-		.procname	= "sched_small_wakee_task_load",
-		.data		= &sysctl_sched_small_wakee_task_load_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler   = sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-		.extra2		= &one_hundred,
-	},
-	{
-		.procname	= "sched_big_waker_task_load",
-		.data		= &sysctl_sched_big_waker_task_load_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler   = sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-		.extra2		= &one_hundred,
-	},
-	{
-		.procname	= "sched_prefer_sync_wakee_to_waker",
-		.data		= &sysctl_sched_prefer_sync_wakee_to_waker,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-		.extra2		= &one,
-	},
-	{
-		.procname       = "sched_enable_thread_grouping",
-		.data           = &sysctl_sched_enable_thread_grouping,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = proc_dointvec,
-	},
-	{
-		.procname	= "sched_pred_alert_freq",
-		.data		= &sysctl_sched_pred_alert_freq,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero,
-	},
-	{
-		.procname       = "sched_freq_aggregate",
-		.data           = &sysctl_sched_freq_aggregate,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = sched_window_update_handler,
-	},
-	{
-		.procname	= "sched_freq_aggregate_threshold",
-		.data		= &sysctl_sched_freq_aggregate_threshold_pct,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= sched_hmp_proc_update_handler,
-		.extra1		= &zero,
-		/*
-		 * Special handling for sched_freq_aggregate_threshold_pct
-		 * which can be greater than 100. Use 1000 as an upper bound
-		 * value which works for all practical use cases.
-		 */
-		.extra2		= &one_thousand,
-	},
-	{
-		.procname	= "sched_short_burst_ns",
-		.data		= &sysctl_sched_short_burst,
-		.maxlen		= sizeof(unsigned int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname       = "sched_short_sleep_ns",
-		.data           = &sysctl_sched_short_sleep,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = proc_dointvec,
-	},
-#endif	/* CONFIG_SCHED_HMP */
 #ifdef CONFIG_SCHED_DEBUG
 	{
 		.procname	= "sched_min_granularity_ns",