Merge "drivers: soc: qcom: snapshot of idle/sleep driver as of msm-4.14"
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 9d7176c..ca1b079 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -29,3 +29,4 @@
 # POWERPC drivers
 obj-$(CONFIG_PSERIES_CPUIDLE)		+= cpuidle-pseries.o
 obj-$(CONFIG_POWERNV_CPUIDLE)		+= cpuidle-powernv.o
+obj-$(CONFIG_MSM_PM) 			+= lpm-levels.o lpm-levels-of.o
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
new file mode 100644
index 0000000..97bf753
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -0,0 +1,778 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
+#include "lpm-levels.h"
+
+enum lpm_type {
+	IDLE = 0,
+	SUSPEND,
+	LATENCY,
+	LPM_TYPE_NR
+};
+
+struct lpm_type_str {
+	enum lpm_type type;
+	char *str;
+};
+
+static const struct lpm_type_str lpm_types[] = {
+	{IDLE, "idle_enabled"},
+	{SUSPEND, "suspend_enabled"},
+	{LATENCY, "exit_latency_us"},
+};
+
+static struct lpm_level_avail *cpu_level_available[NR_CPUS];
+static struct platform_device *lpm_pdev;
+
+static void *get_enabled_ptr(struct kobj_attribute *attr,
+					struct lpm_level_avail *avail)
+{
+	void *arg = NULL;
+
+	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+		arg = (void *) &avail->idle_enabled;
+	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+		arg = (void *) &avail->suspend_enabled;
+
+	return arg;
+}
+
+static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj,
+					struct kobj_attribute *attr)
+{
+	struct lpm_level_avail *avail = NULL;
+
+	if (!strcmp(attr->attr.name, lpm_types[IDLE].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					idle_enabled_attr);
+	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					suspend_enabled_attr);
+	else if (!strcmp(attr->attr.name, lpm_types[LATENCY].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					latency_attr);
+
+	return avail;
+}
+
+static ssize_t lpm_latency_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int ret = 0;
+	struct kernel_param kp;
+	struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
+
+	if (WARN_ON(!avail))
+		return -EINVAL;
+
+	kp.arg = &avail->exit_latency;
+
+	ret = param_get_uint(buf, &kp);
+	if (ret > 0) {
+		strlcat(buf, "\n", PAGE_SIZE);
+		ret++;
+	}
+
+	return ret;
+}
+
+ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
+				char *buf)
+{
+	int ret = 0;
+	struct kernel_param kp;
+	struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
+
+	if (WARN_ON(!avail))
+		return -EINVAL;
+
+	kp.arg = get_enabled_ptr(attr, avail);
+	if (WARN_ON(!kp.arg))
+		return -EINVAL;
+
+	ret = param_get_bool(buf, &kp);
+	if (ret > 0) {
+		strlcat(buf, "\n", PAGE_SIZE);
+		ret++;
+	}
+
+	return ret;
+}
+
+ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr,
+				const char *buf, size_t len)
+{
+	int ret = 0;
+	struct kernel_param kp;
+	struct lpm_level_avail *avail;
+
+	avail = get_avail_ptr(kobj, attr);
+	if (WARN_ON(!avail))
+		return -EINVAL;
+
+	kp.arg = get_enabled_ptr(attr, avail);
+	ret = param_set_bool(buf, &kp);
+
+	return ret ? ret : len;
+}
+
+static int create_lvl_avail_nodes(const char *name,
+			struct kobject *parent, struct lpm_level_avail *avail,
+			void *data, int index, bool cpu_node)
+{
+	struct attribute_group *attr_group = NULL;
+	struct attribute **attr = NULL;
+	struct kobject *kobj = NULL;
+	int ret = 0;
+
+	kobj = kobject_create_and_add(name, parent);
+	if (!kobj)
+		return -ENOMEM;
+
+	attr_group = devm_kzalloc(&lpm_pdev->dev, sizeof(*attr_group),
+					GFP_KERNEL);
+	if (!attr_group) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	attr = devm_kzalloc(&lpm_pdev->dev,
+		sizeof(*attr) * (LPM_TYPE_NR + 1), GFP_KERNEL);
+	if (!attr) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	sysfs_attr_init(&avail->idle_enabled_attr.attr);
+	avail->idle_enabled_attr.attr.name = lpm_types[IDLE].str;
+	avail->idle_enabled_attr.attr.mode = 0644;
+	avail->idle_enabled_attr.show = lpm_enable_show;
+	avail->idle_enabled_attr.store = lpm_enable_store;
+
+	sysfs_attr_init(&avail->suspend_enabled_attr.attr);
+	avail->suspend_enabled_attr.attr.name = lpm_types[SUSPEND].str;
+	avail->suspend_enabled_attr.attr.mode = 0644;
+	avail->suspend_enabled_attr.show = lpm_enable_show;
+	avail->suspend_enabled_attr.store = lpm_enable_store;
+
+	sysfs_attr_init(&avail->latency_attr.attr);
+	avail->latency_attr.attr.name = lpm_types[LATENCY].str;
+	avail->latency_attr.attr.mode = 0444;
+	avail->latency_attr.show = lpm_latency_show;
+	avail->latency_attr.store = NULL;
+
+	attr[0] = &avail->idle_enabled_attr.attr;
+	attr[1] = &avail->suspend_enabled_attr.attr;
+	attr[2] = &avail->latency_attr.attr;
+	attr[3] = NULL;
+	attr_group->attrs = attr;
+
+	ret = sysfs_create_group(kobj, attr_group);
+	if (ret) {
+		ret = -ENOMEM;
+		goto failed;
+	}
+
+	avail->idle_enabled = true;
+	avail->suspend_enabled = true;
+	avail->kobj = kobj;
+	avail->data = data;
+	avail->idx = index;
+	avail->cpu_node = cpu_node;
+
+	return ret;
+
+failed:
+	kobject_put(kobj);
+	return ret;
+}
+
+static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent)
+{
+	int cpu;
+	int i, cpu_idx;
+	struct kobject **cpu_kobj = NULL;
+	struct lpm_level_avail *level_list = NULL;
+	char cpu_name[20] = {0};
+	int ret = 0;
+	struct list_head *pos;
+
+	cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
+			cpumask_weight(&p->child_cpus), GFP_KERNEL);
+	if (!cpu_kobj)
+		return -ENOMEM;
+
+	cpu_idx = 0;
+	list_for_each(pos, &p->cpu) {
+		struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list);
+
+		for_each_cpu(cpu, &lpm_cpu->related_cpus) {
+			snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+			cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name,
+					parent);
+			if (!cpu_kobj[cpu_idx]) {
+				ret = -ENOMEM;
+				goto release_kobj;
+			}
+
+			level_list = devm_kzalloc(&lpm_pdev->dev,
+					lpm_cpu->nlevels * sizeof(*level_list),
+					GFP_KERNEL);
+			if (!level_list) {
+				ret = -ENOMEM;
+				goto release_kobj;
+			}
+
+			/*
+			 * Skip enable/disable for WFI. cpuidle expects WFI to
+			 * be available at all times.
+			 */
+			for (i = 1; i < lpm_cpu->nlevels; i++) {
+				level_list[i].exit_latency =
+					p->levels[i].pwr.exit_latency;
+				ret = create_lvl_avail_nodes(
+						lpm_cpu->levels[i].name,
+						cpu_kobj[cpu_idx],
+						&level_list[i],
+						(void *)lpm_cpu, cpu, true);
+				if (ret)
+					goto release_kobj;
+			}
+
+			cpu_level_available[cpu] = level_list;
+			cpu_idx++;
+		}
+	}
+
+	return ret;
+
+release_kobj:
+	for (i = 0; i < cpumask_weight(&p->child_cpus); i++)
+		kobject_put(cpu_kobj[i]);
+
+	return ret;
+}
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj)
+{
+	int ret = 0;
+	struct lpm_cluster *child = NULL;
+	int i;
+	struct kobject *cluster_kobj = NULL;
+
+	if (!p)
+		return -ENODEV;
+
+	cluster_kobj = kobject_create_and_add(p->cluster_name, kobj);
+	if (!cluster_kobj)
+		return -ENOMEM;
+
+	for (i = 0; i < p->nlevels; i++) {
+		p->levels[i].available.exit_latency =
+					p->levels[i].pwr.exit_latency;
+		ret = create_lvl_avail_nodes(p->levels[i].level_name,
+				cluster_kobj, &p->levels[i].available,
+				(void *)p, 0, false);
+		if (ret)
+			return ret;
+	}
+
+	list_for_each_entry(child, &p->child, list) {
+		ret = create_cluster_lvl_nodes(child, cluster_kobj);
+		if (ret)
+			return ret;
+	}
+
+	if (!list_empty(&p->cpu)) {
+		ret = create_cpu_lvl_nodes(p, cluster_kobj);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+int lpm_cpu_mode_allow(unsigned int cpu,
+		unsigned int index, bool from_idle)
+{
+	struct lpm_level_avail *avail = cpu_level_available[cpu];
+
+	if (lpm_pdev && !index)
+		return 1;
+
+	if (!lpm_pdev || !avail)
+		return !from_idle;
+
+	return !!(from_idle ? avail[index].idle_enabled :
+				avail[index].suspend_enabled);
+}
+
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+		unsigned int mode, bool from_idle)
+{
+	struct lpm_level_avail *avail = &cluster->levels[mode].available;
+
+	if (!lpm_pdev || !avail)
+		return false;
+
+	return !!(from_idle ? avail->idle_enabled :
+				avail->suspend_enabled);
+}
+
+static int parse_cluster_params(struct device_node *node,
+		struct lpm_cluster *c)
+{
+	char *key;
+	int ret;
+
+	key = "label";
+	ret = of_property_read_string(node, key, &c->cluster_name);
+	if (ret)
+		goto fail;
+
+	key = "qcom,psci-mode-shift";
+	ret = of_property_read_u32(node, key, &c->psci_mode_shift);
+	if (ret)
+		goto fail;
+
+	key = "qcom,psci-mode-mask";
+	ret = of_property_read_u32(node, key, &c->psci_mode_mask);
+	if (ret)
+		goto fail;
+
+	key = "qcom,disable-prediction";
+	c->lpm_prediction = !(of_property_read_bool(node, key));
+
+	if (c->lpm_prediction) {
+		key = "qcom,clstr-tmr-add";
+		ret = of_property_read_u32(node, key, &c->tmr_add);
+		if (ret || c->tmr_add < TIMER_ADD_LOW ||
+					c->tmr_add > TIMER_ADD_HIGH)
+			c->tmr_add = DEFAULT_TIMER_ADD;
+	}
+
+	/* Set default_level to 0 as default */
+	c->default_level = 0;
+
+	return 0;
+fail:
+	pr_err("Failed to read key: %s ret: %d\n", key, ret);
+
+	return ret;
+}
+
+static int parse_power_params(struct device_node *node,
+		struct power_params *pwr)
+{
+	char *key;
+	int ret;
+
+	key = "qcom,entry-latency-us";
+	ret  = of_property_read_u32(node, key, &pwr->entry_latency);
+	if (ret)
+		goto fail;
+
+	key = "qcom,exit-latency-us";
+	ret  = of_property_read_u32(node, key, &pwr->exit_latency);
+	if (ret)
+		goto fail;
+
+	key = "qcom,min-residency-us";
+	ret = of_property_read_u32(node, key, &pwr->min_residency);
+	if (ret)
+		goto fail;
+
+	return ret;
+fail:
+	pr_err("Failed to read key: %s node: %s\n", key, node->name);
+
+	return ret;
+}
+
+static int parse_cluster_level(struct device_node *node,
+		struct lpm_cluster *cluster)
+{
+	struct lpm_cluster_level *level = &cluster->levels[cluster->nlevels];
+	int ret = -ENOMEM;
+	char *key;
+
+	key = "label";
+	ret = of_property_read_string(node, key, &level->level_name);
+	if (ret)
+		goto failed;
+
+	key = "qcom,psci-mode";
+	ret = of_property_read_u32(node, key, &level->psci_id);
+	if (ret)
+		goto failed;
+
+	level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+
+	if (cluster->nlevels != cluster->default_level) {
+		key = "qcom,min-child-idx";
+		ret = of_property_read_u32(node, key, &level->min_child_level);
+		if (ret)
+			goto failed;
+
+		if (cluster->min_child_level > level->min_child_level)
+			cluster->min_child_level = level->min_child_level;
+	}
+
+	level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
+
+	key = "parse_power_params";
+	ret = parse_power_params(node, &level->pwr);
+	if (ret)
+		goto failed;
+
+	key = "qcom,reset-level";
+	ret = of_property_read_u32(node, key, &level->reset_level);
+	if (ret == -EINVAL)
+		level->reset_level = LPM_RESET_LVL_NONE;
+	else if (ret)
+		goto failed;
+
+	cluster->nlevels++;
+
+	return 0;
+failed:
+	pr_err("Failed to read key: %s ret: %d\n", key, ret);
+
+	return ret;
+}
+
+static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l)
+{
+	char *key;
+	int ret;
+
+	key = "label";
+	ret = of_property_read_string(n, key, &l->name);
+	if (ret)
+		goto fail;
+
+	key = "qcom,psci-cpu-mode";
+	ret = of_property_read_u32(n, key, &l->psci_id);
+	if (ret)
+		goto fail;
+
+	return ret;
+fail:
+	pr_err("Failed to read key: %s level: %s\n", key, l->name);
+
+	return ret;
+}
+
+static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask)
+{
+	struct device_node *cpu_node;
+	int cpu;
+	int idx = 0;
+
+	cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+	if (!cpu_node) {
+		pr_info("%s: No CPU phandle, assuming single cluster\n",
+				node->full_name);
+		/*
+		 * Not all targets have the cpu node populated in the device
+		 * tree. If cpu node is not populated assume all possible
+		 * nodes belong to this cluster
+		 */
+		cpumask_copy(mask, cpu_possible_mask);
+		return 0;
+	}
+
+	while (cpu_node) {
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+				cpumask_set_cpu(cpu, mask);
+				break;
+			}
+		}
+		of_node_put(cpu_node);
+		cpu_node = of_parse_phandle(node, "qcom,cpu", idx++);
+	}
+
+	return 0;
+}
+
+static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
+{
+
+	struct device_node *n;
+	int ret, i;
+	const char *key;
+
+	for_each_child_of_node(node, n) {
+		struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels];
+
+		cpu->nlevels++;
+
+		ret = parse_cpu_mode(n, l);
+		if (ret) {
+			of_node_put(n);
+			return ret;
+		}
+
+		ret = parse_power_params(n, &l->pwr);
+		if (ret) {
+			of_node_put(n);
+			return ret;
+		}
+
+		key = "qcom,use-broadcast-timer";
+		l->use_bc_timer = of_property_read_bool(n, key);
+
+		key = "qcom,is-reset";
+		l->is_reset = of_property_read_bool(n, key);
+
+		key = "qcom,reset-level";
+		ret = of_property_read_u32(n, key, &l->reset_level);
+		of_node_put(n);
+
+		if (ret == -EINVAL)
+			l->reset_level = LPM_RESET_LVL_NONE;
+		else if (ret)
+			return ret;
+	}
+
+	for (i = 1; i < cpu->nlevels; i++)
+		cpu->levels[i-1].pwr.max_residency =
+			cpu->levels[i].pwr.min_residency - 1;
+
+	cpu->levels[i-1].pwr.max_residency = UINT_MAX;
+
+	return 0;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+	int ret;
+	char *key;
+	struct lpm_cpu *cpu;
+
+	cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL);
+	if (!cpu)
+		return -ENOMEM;
+
+	if (get_cpumask_for_node(node, &cpu->related_cpus))
+		return -EINVAL;
+
+	cpu->parent = c;
+
+	key = "qcom,psci-mode-shift";
+	ret = of_property_read_u32(node, key, &cpu->psci_mode_shift);
+	if (ret)
+		goto failed;
+
+	key = "qcom,psci-mode-mask";
+	ret = of_property_read_u32(node, key, &cpu->psci_mode_mask);
+	if (ret)
+		goto failed;
+
+	key = "qcom,disable-prediction";
+	cpu->lpm_prediction = !(of_property_read_bool(node, key));
+
+	if (cpu->lpm_prediction) {
+		key = "qcom,ref-stddev";
+		ret = of_property_read_u32(node, key, &cpu->ref_stddev);
+		if (ret || cpu->ref_stddev < STDDEV_LOW ||
+					cpu->ref_stddev > STDDEV_HIGH)
+			cpu->ref_stddev = DEFAULT_STDDEV;
+
+		key = "qcom,tmr-add";
+		ret = of_property_read_u32(node, key, &cpu->tmr_add);
+		if (ret || cpu->tmr_add < TIMER_ADD_LOW ||
+					cpu->tmr_add > TIMER_ADD_HIGH)
+			cpu->tmr_add = DEFAULT_TIMER_ADD;
+
+		key = "qcom,ref-premature-cnt";
+		ret = of_property_read_u32(node, key, &cpu->ref_premature_cnt);
+		if (ret || cpu->ref_premature_cnt < PREMATURE_CNT_LOW ||
+				cpu->ref_premature_cnt > PREMATURE_CNT_HIGH)
+			cpu->ref_premature_cnt = DEFAULT_PREMATURE_CNT;
+	}
+
+	key = "parse_cpu";
+	ret = parse_cpu(node, cpu);
+	if (ret)
+		goto failed;
+
+	cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
+	list_add(&cpu->list, &c->cpu);
+
+	return ret;
+
+failed:
+	pr_err("Failed to read key: %s node: %s\n", key, node->name);
+	return ret;
+}
+
+void free_cluster_node(struct lpm_cluster *cluster)
+{
+	struct lpm_cpu *cpu, *n;
+	struct lpm_cluster *cl, *m;
+
+	list_for_each_entry_safe(cl, m, &cluster->child, list) {
+		list_del(&cl->list);
+		free_cluster_node(cl);
+	}
+
+	list_for_each_entry_safe(cpu, n, &cluster->cpu, list)
+		list_del(&cpu->list);
+}
+
+/*
+ * TODO:
+ * Expects a CPU or a cluster only. This ensures that affinity
+ * level of a cluster is consistent with reference to its
+ * child nodes.
+ */
+struct lpm_cluster *parse_cluster(struct device_node *node,
+		struct lpm_cluster *parent)
+{
+	struct lpm_cluster *c;
+	struct device_node *n;
+	char *key;
+	int ret = 0, i;
+
+	c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	ret = parse_cluster_params(node, c);
+	if (ret)
+		return NULL;
+
+	INIT_LIST_HEAD(&c->child);
+	INIT_LIST_HEAD(&c->cpu);
+	c->parent = parent;
+	spin_lock_init(&c->sync_lock);
+	c->min_child_level = NR_LPM_LEVELS;
+
+	for_each_child_of_node(node, n) {
+		if (!n->name) {
+			of_node_put(n);
+			continue;
+		}
+
+		if (!of_node_cmp(n->name, "qcom,pm-cluster-level")) {
+			key = "qcom,pm-cluster-level";
+			if (parse_cluster_level(n, c))
+				goto failed_parse_cluster;
+		} else if (!of_node_cmp(n->name, "qcom,pm-cluster")) {
+			struct lpm_cluster *child;
+
+			key = "qcom,pm-cluster";
+			child = parse_cluster(n, c);
+			if (!child)
+				goto failed_parse_cluster;
+
+			list_add(&child->list, &c->child);
+			cpumask_or(&c->child_cpus, &c->child_cpus,
+					&child->child_cpus);
+			c->aff_level = child->aff_level + 1;
+		} else if (!of_node_cmp(n->name, "qcom,pm-cpu")) {
+			key = "qcom,pm-cpu";
+			if (parse_cpu_levels(n, c))
+				goto failed_parse_cluster;
+
+			c->aff_level = 1;
+		}
+
+		of_node_put(n);
+	}
+
+	if (cpumask_intersects(&c->child_cpus, cpu_online_mask))
+		c->last_level = c->default_level;
+	else
+		c->last_level = c->nlevels-1;
+
+	for (i = 1; i < c->nlevels; i++)
+		c->levels[i-1].pwr.max_residency =
+			c->levels[i].pwr.min_residency - 1;
+
+	c->levels[i-1].pwr.max_residency = UINT_MAX;
+
+	return c;
+
+failed_parse_cluster:
+	pr_err("Failed parse cluster:%s\n", key);
+	of_node_put(n);
+	if (parent)
+		list_del(&c->list);
+	free_cluster_node(c);
+	return NULL;
+}
+
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev)
+{
+	struct device_node *top = NULL;
+	struct lpm_cluster *c;
+
+	top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
+	if (!top) {
+		pr_err("Failed to find root node\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	lpm_pdev = pdev;
+	c = parse_cluster(top, NULL);
+	of_node_put(top);
+	return c;
+}
+
+void cluster_dt_walkthrough(struct lpm_cluster *cluster)
+{
+	struct list_head *list;
+	struct lpm_cpu *cpu;
+	int i, j;
+	static int id;
+	char str[10] = {0};
+
+	if (!cluster)
+		return;
+
+	for (i = 0; i < id; i++)
+		snprintf(str+i, 10 - i, "\t");
+	pr_info("%d\n", __LINE__);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *l = &cluster->levels[i];
+
+		pr_info("cluster: %s \t level: %s\n", cluster->cluster_name,
+							l->level_name);
+	}
+
+	list_for_each_entry(cpu, &cluster->cpu, list) {
+		pr_info("%d\n", __LINE__);
+		for (j = 0; j < cpu->nlevels; j++)
+			pr_info("%s\tCPU level name: %s\n", str,
+						cpu->levels[j].name);
+	}
+
+	id++;
+
+	list_for_each(list, &cluster->child) {
+		struct lpm_cluster *n;
+
+		pr_info("%d\n", __LINE__);
+		n = list_entry(list, typeof(*n), list);
+		cluster_dt_walkthrough(n);
+	}
+	id--;
+}
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
new file mode 100644
index 0000000..ba3cb9a
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels.c
@@ -0,0 +1,1770 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
+ * Copyright (C) 2009 Intel Corporation
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/pm_qos.h>
+#include <linux/of_platform.h>
+#include <linux/smp.h>
+#include <linux/dma-mapping.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpuhotplug.h>
+#include <linux/sched/clock.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/event_timer.h>
+#include <soc/qcom/lpm_levels.h>
+#include <soc/qcom/lpm-stats.h>
+#include <soc/qcom/minidump.h>
+#include <asm/arch_timer.h>
+#include <asm/suspend.h>
+#include <asm/cpuidle.h>
+#include "lpm-levels.h"
+#include <trace/events/power.h>
+#include "../clk/clk.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_low_power.h>
+
+#define SCLK_HZ (32768)
+#define PSCI_POWER_STATE(reset) (reset << 30)
+#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
+#define BIAS_HYST (bias_hyst * NSEC_PER_MSEC)
+
+enum {
+	MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
+	MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
+};
+
+enum debug_event {
+	CPU_ENTER,
+	CPU_EXIT,
+	CLUSTER_ENTER,
+	CLUSTER_EXIT,
+	CPU_HP_STARTING,
+	CPU_HP_DYING,
+};
+
+struct lpm_debug {
+	u64 time;
+	enum debug_event evt;
+	int cpu;
+	uint32_t arg1;
+	uint32_t arg2;
+	uint32_t arg3;
+	uint32_t arg4;
+};
+
+static struct system_pm_ops *sys_pm_ops;
+
+
+struct lpm_cluster *lpm_root_node;
+
+#define MAXSAMPLES 5
+
+static bool lpm_prediction = true;
+module_param_named(lpm_prediction, lpm_prediction, bool, 0664);
+
+static uint32_t bias_hyst;
+module_param_named(bias_hyst, bias_hyst, uint, 0664);
+
+struct lpm_history {
+	uint32_t resi[MAXSAMPLES];
+	int mode[MAXSAMPLES];
+	int nsamp;
+	uint32_t hptr;
+	uint32_t hinvalid;
+	uint32_t htmr_wkup;
+	int64_t stime;
+};
+
+static DEFINE_PER_CPU(struct lpm_history, hist);
+
+static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
+static bool suspend_in_progress;
+static struct hrtimer lpm_hrtimer;
+static DEFINE_PER_CPU(struct hrtimer, histtimer);
+static struct lpm_debug *lpm_debug;
+static phys_addr_t lpm_debug_phys;
+static const int num_dbg_elements = 0x100;
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t time);
+static void cluster_prepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t time);
+
+static bool print_parsed_dt;
+module_param_named(print_parsed_dt, print_parsed_dt, bool, 0664);
+
+static bool sleep_disabled;
+module_param_named(sleep_disabled, sleep_disabled, bool, 0664);
+
+/**
+ * msm_cpuidle_get_deep_idle_latency - Get deep idle latency value
+ *
+ * Returns an s32 latency value
+ */
+s32 msm_cpuidle_get_deep_idle_latency(void)
+{
+	return 10;
+}
+EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency);
+
+uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
+{
+	if (sys_pm_ops)
+		return -EUSERS;
+
+	sys_pm_ops = pm_ops;
+
+	return 0;
+}
+
+static uint32_t least_cluster_latency(struct lpm_cluster *cluster,
+					struct latency_level *lat_level)
+{
+	struct list_head *list;
+	struct lpm_cluster_level *level;
+	struct lpm_cluster *n;
+	struct power_params *pwr_params;
+	uint32_t latency = 0;
+	int i;
+
+	if (list_empty(&cluster->list)) {
+		for (i = 0; i < cluster->nlevels; i++) {
+			level = &cluster->levels[i];
+			pwr_params = &level->pwr;
+			if (lat_level->reset_level == level->reset_level) {
+				if ((latency > pwr_params->exit_latency)
+						|| (!latency))
+					latency = pwr_params->exit_latency;
+				break;
+			}
+		}
+	} else {
+		list_for_each(list, &cluster->parent->child) {
+			n = list_entry(list, typeof(*n), list);
+			if (lat_level->level_name) {
+				if (strcmp(lat_level->level_name,
+						 n->cluster_name))
+					continue;
+			}
+			for (i = 0; i < n->nlevels; i++) {
+				level = &n->levels[i];
+				pwr_params = &level->pwr;
+				if (lat_level->reset_level ==
+						level->reset_level) {
+					if ((latency > pwr_params->exit_latency)
+								|| (!latency))
+						latency =
+						pwr_params->exit_latency;
+					break;
+				}
+			}
+		}
+	}
+	return latency;
+}
+
+static uint32_t least_cpu_latency(struct list_head *child,
+				struct latency_level *lat_level)
+{
+	struct list_head *list;
+	struct lpm_cpu_level *level;
+	struct power_params *pwr_params;
+	struct lpm_cpu *cpu;
+	struct lpm_cluster *n;
+	uint32_t lat = 0;
+	int i;
+
+	list_for_each(list, child) {
+		n = list_entry(list, typeof(*n), list);
+		if (lat_level->level_name) {
+			if (strcmp(lat_level->level_name, n->cluster_name))
+				continue;
+		}
+		list_for_each_entry(cpu, &n->cpu, list) {
+			for (i = 0; i < cpu->nlevels; i++) {
+				level = &cpu->levels[i];
+				pwr_params = &level->pwr;
+				if (lat_level->reset_level
+						== level->reset_level) {
+					if ((lat > pwr_params->exit_latency)
+							|| (!lat))
+						lat = pwr_params->exit_latency;
+					break;
+				}
+			}
+		}
+	}
+	return lat;
+}
+
+static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
+							int affinity_level)
+{
+	struct lpm_cluster *n;
+
+	if ((cluster->aff_level == affinity_level)
+		|| ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
+		return cluster;
+	else if (list_empty(&cluster->cpu)) {
+		n =  list_entry(cluster->child.next, typeof(*n), list);
+		return cluster_aff_match(n, affinity_level);
+	} else
+		return NULL;
+}
+
+int lpm_get_latency(struct latency_level *level, uint32_t *latency)
+{
+	struct lpm_cluster *cluster;
+	uint32_t val;
+
+	if (!lpm_root_node) {
+		pr_err("lpm_probe not completed\n");
+		return -EAGAIN;
+	}
+
+	if ((level->affinity_level < 0)
+		|| (level->affinity_level > lpm_root_node->aff_level)
+		|| (level->reset_level < LPM_RESET_LVL_RET)
+		|| (level->reset_level > LPM_RESET_LVL_PC)
+		|| !latency)
+		return -EINVAL;
+
+	cluster = cluster_aff_match(lpm_root_node, level->affinity_level);
+	if (!cluster) {
+		pr_err("No matching cluster found for affinity_level:%d\n",
+							level->affinity_level);
+		return -EINVAL;
+	}
+
+	if (level->affinity_level == 0)
+		val = least_cpu_latency(&cluster->parent->child, level);
+	else
+		val = least_cluster_latency(cluster, level);
+
+	if (!val) {
+		pr_err("No mode with affinity_level:%d reset_level:%d\n",
+				level->affinity_level, level->reset_level);
+		return -EINVAL;
+	}
+
+	*latency = val;
+
+	return 0;
+}
+EXPORT_SYMBOL(lpm_get_latency);
+
+static void update_debug_pc_event(enum debug_event event, uint32_t arg1,
+		uint32_t arg2, uint32_t arg3, uint32_t arg4)
+{
+	struct lpm_debug *dbg;
+	int idx;
+	static DEFINE_SPINLOCK(debug_lock);
+	static int pc_event_index;
+
+	if (!lpm_debug)
+		return;
+
+	spin_lock(&debug_lock);
+	idx = pc_event_index++;
+	dbg = &lpm_debug[idx & (num_dbg_elements - 1)];
+
+	dbg->evt = event;
+	dbg->time = arch_counter_get_cntvct();
+	dbg->cpu = raw_smp_processor_id();
+	dbg->arg1 = arg1;
+	dbg->arg2 = arg2;
+	dbg->arg3 = arg3;
+	dbg->arg4 = arg4;
+	spin_unlock(&debug_lock);
+}
+
+static int lpm_dying_cpu(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+
+	update_debug_pc_event(CPU_HP_DYING, cpu,
+				cluster->num_children_in_sync.bits[0],
+				cluster->child_cpus.bits[0], false);
+	cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+	return 0;
+}
+
+static int lpm_starting_cpu(unsigned int cpu)
+{
+	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+
+	update_debug_pc_event(CPU_HP_STARTING, cpu,
+				cluster->num_children_in_sync.bits[0],
+				cluster->child_cpus.bits[0], false);
+	cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+	return 0;
+}
+
+static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
+{
+	return HRTIMER_NORESTART;
+}
+
+static void histtimer_cancel(void)
+{
+	unsigned int cpu = raw_smp_processor_id();
+	struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu);
+
+	hrtimer_try_to_cancel(cpu_histtimer);
+}
+
+static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
+{
+	int cpu = raw_smp_processor_id();
+	struct lpm_history *history = &per_cpu(hist, cpu);
+
+	history->hinvalid = 1;
+	return HRTIMER_NORESTART;
+}
+
+static void histtimer_start(uint32_t time_us)
+{
+	uint64_t time_ns = time_us * NSEC_PER_USEC;
+	ktime_t hist_ktime = ns_to_ktime(time_ns);
+	unsigned int cpu = raw_smp_processor_id();
+	struct hrtimer *cpu_histtimer = &per_cpu(histtimer, cpu);
+
+	cpu_histtimer->function = histtimer_fn;
+	hrtimer_start(cpu_histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+static void cluster_timer_init(struct lpm_cluster *cluster)
+{
+	struct list_head *list;
+
+	if (!cluster)
+		return;
+
+	hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+	list_for_each(list, &cluster->child) {
+		struct lpm_cluster *n;
+
+		n = list_entry(list, typeof(*n), list);
+		cluster_timer_init(n);
+	}
+}
+
+static void clusttimer_cancel(void)
+{
+	int cpu = raw_smp_processor_id();
+	struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
+
+	hrtimer_try_to_cancel(&cluster->histtimer);
+
+	if (cluster->parent)
+		hrtimer_try_to_cancel(&cluster->parent->histtimer);
+}
+
+static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
+{
+	struct lpm_cluster *cluster = container_of(h,
+				struct lpm_cluster, histtimer);
+
+	cluster->history.hinvalid = 1;
+	return HRTIMER_NORESTART;
+}
+
+static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
+{
+	uint64_t time_ns = time_us * NSEC_PER_USEC;
+	ktime_t clust_ktime = ns_to_ktime(time_ns);
+
+	cluster->histtimer.function = clusttimer_fn;
+	hrtimer_start(&cluster->histtimer, clust_ktime,
+				HRTIMER_MODE_REL_PINNED);
+}
+
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+	u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+	ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+
+	lpm_hrtimer.function = lpm_hrtimer_cb;
+	hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
+		struct lpm_cpu *cpu, int *idx_restrict,
+		uint32_t *idx_restrict_time)
+{
+	int i, j, divisor;
+	uint64_t max, avg, stddev;
+	int64_t thresh = LLONG_MAX;
+	struct lpm_history *history = &per_cpu(hist, dev->cpu);
+
+	if (!lpm_prediction || !cpu->lpm_prediction)
+		return 0;
+
+	/*
+	 * Samples are marked invalid when woken-up due to timer,
+	 * so donot predict.
+	 */
+	if (history->hinvalid) {
+		history->hinvalid = 0;
+		history->htmr_wkup = 1;
+		history->stime = 0;
+		return 0;
+	}
+
+	/*
+	 * Predict only when all the samples are collected.
+	 */
+	if (history->nsamp < MAXSAMPLES) {
+		history->stime = 0;
+		return 0;
+	}
+
+	/*
+	 * Check if the samples are not much deviated, if so use the
+	 * average of those as predicted sleep time. Else if any
+	 * specific mode has more premature exits return the index of
+	 * that mode.
+	 */
+
+again:
+	max = avg = divisor = stddev = 0;
+	for (i = 0; i < MAXSAMPLES; i++) {
+		int64_t value = history->resi[i];
+
+		if (value <= thresh) {
+			avg += value;
+			divisor++;
+			if (value > max)
+				max = value;
+		}
+	}
+	do_div(avg, divisor);
+
+	for (i = 0; i < MAXSAMPLES; i++) {
+		int64_t value = history->resi[i];
+
+		if (value <= thresh) {
+			int64_t diff = value - avg;
+
+			stddev += diff * diff;
+		}
+	}
+	do_div(stddev, divisor);
+	stddev = int_sqrt(stddev);
+
+	/*
+	 * If the deviation is less, return the average, else
+	 * ignore one maximum sample and retry
+	 */
+	if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
+					|| stddev <= cpu->ref_stddev) {
+		history->stime = ktime_to_us(ktime_get()) + avg;
+		return avg;
+	} else if (divisor  > (MAXSAMPLES - 1)) {
+		thresh = max - 1;
+		goto again;
+	}
+
+	/*
+	 * Find the number of premature exits for each of the mode,
+	 * excluding clockgating mode, and they are more than fifty
+	 * percent restrict that and deeper modes.
+	 */
+	if (history->htmr_wkup != 1) {
+		for (j = 1; j < cpu->nlevels; j++) {
+			struct lpm_cpu_level *level = &cpu->levels[j];
+			uint32_t min_residency = level->pwr.min_residency;
+			uint32_t max_residency = 0;
+			struct lpm_cpu_level *lvl;
+			uint32_t failed = 0;
+			uint64_t total = 0;
+
+			for (i = 0; i < MAXSAMPLES; i++) {
+				if ((history->mode[i] == j) &&
+					(history->resi[i] < min_residency)) {
+					failed++;
+					total += history->resi[i];
+				}
+			}
+			if (failed >= cpu->ref_premature_cnt) {
+				*idx_restrict = j;
+				do_div(total, failed);
+				for (i = 0; i < j; i++) {
+					lvl = &cpu->levels[i];
+					max_residency = lvl->pwr.max_residency;
+					if (total < max_residency) {
+						*idx_restrict = i + 1;
+						total = max_residency;
+						break;
+					}
+				}
+
+				*idx_restrict_time = total;
+				history->stime = ktime_to_us(ktime_get())
+						+ *idx_restrict_time;
+				break;
+			}
+		}
+	}
+	return 0;
+}
+
+static inline void invalidate_predict_history(struct cpuidle_device *dev)
+{
+	struct lpm_history *history = &per_cpu(hist, dev->cpu);
+	struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, dev->cpu);
+
+	if (!lpm_prediction || !lpm_cpu->lpm_prediction)
+		return;
+
+	if (history->hinvalid) {
+		history->hinvalid = 0;
+		history->htmr_wkup = 1;
+		history->stime = 0;
+	}
+}
+
+static void clear_predict_history(void)
+{
+	struct lpm_history *history;
+	int i;
+	unsigned int cpu;
+	struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, raw_smp_processor_id());
+
+	if (!lpm_prediction || !lpm_cpu->lpm_prediction)
+		return;
+
+	for_each_possible_cpu(cpu) {
+		history = &per_cpu(hist, cpu);
+		for (i = 0; i < MAXSAMPLES; i++) {
+			history->resi[i]  = 0;
+			history->mode[i] = -1;
+			history->hptr = 0;
+			history->nsamp = 0;
+			history->stime = 0;
+		}
+	}
+}
+
+static void update_history(struct cpuidle_device *dev, int idx);
+
+static inline bool is_cpu_biased(int cpu)
+{
+	return false;
+}
+
+static inline bool lpm_disallowed(s64 sleep_us, int cpu)
+{
+	if (sleep_disabled || is_cpu_biased(cpu))
+		return true;
+
+	if (sleep_us < 0)
+		return true;
+
+	return false;
+}
+
+static int cpu_power_select(struct cpuidle_device *dev,
+		struct lpm_cpu *cpu)
+{
+	ktime_t delta_next;
+	int best_level = 0;
+	uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY,
+							dev->cpu);
+	s64 sleep_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
+	uint32_t modified_time_us = 0;
+	uint32_t next_event_us = 0;
+	int i, idx_restrict;
+	uint32_t lvl_latency_us = 0;
+	uint64_t predicted = 0;
+	uint32_t htime = 0, idx_restrict_time = 0;
+	uint32_t next_wakeup_us = (uint32_t)sleep_us;
+	uint32_t min_residency, max_residency;
+	struct power_params *pwr_params;
+
+	if (lpm_disallowed(sleep_us, dev->cpu))
+		goto done_select;
+
+	idx_restrict = cpu->nlevels + 1;
+	next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu)));
+
+	for (i = 0; i < cpu->nlevels; i++) {
+		if (!lpm_cpu_mode_allow(dev->cpu, i, true))
+			continue;
+
+		pwr_params = &cpu->levels[i].pwr;
+		lvl_latency_us = pwr_params->exit_latency;
+		min_residency = pwr_params->min_residency;
+		max_residency = pwr_params->max_residency;
+
+		if (latency_us < lvl_latency_us)
+			break;
+
+		if (next_event_us) {
+			if (next_event_us < lvl_latency_us)
+				break;
+
+			if (next_event_us < sleep_us)
+				next_wakeup_us = next_event_us - lvl_latency_us;
+		}
+
+		if (!i) {
+			/*
+			 * If the next_wake_us itself is not sufficient for
+			 * deeper low power modes than clock gating do not
+			 * call prediction.
+			 */
+			if (next_wakeup_us > max_residency) {
+				predicted = lpm_cpuidle_predict(dev, cpu,
+					&idx_restrict, &idx_restrict_time);
+				if (predicted && (predicted < min_residency))
+					predicted = min_residency;
+			} else
+				invalidate_predict_history(dev);
+		}
+
+		if (i >= idx_restrict)
+			break;
+
+		best_level = i;
+
+		if (next_event_us && next_event_us < sleep_us && !i)
+			modified_time_us = next_event_us - lvl_latency_us;
+		else
+			modified_time_us = 0;
+
+		if (predicted ? (predicted <= max_residency)
+			: (next_wakeup_us <= max_residency))
+			break;
+	}
+
+	if (modified_time_us)
+		msm_pm_set_timer(modified_time_us);
+
+	/*
+	 * Start timer to avoid staying in shallower mode forever
+	 * incase of misprediciton
+	 */
+
+	pwr_params = &cpu->levels[best_level].pwr;
+	min_residency = pwr_params->min_residency;
+	max_residency = pwr_params->max_residency;
+
+	if ((predicted || (idx_restrict != cpu->nlevels + 1)) &&
+	    (best_level < (cpu->nlevels-1))) {
+		htime = predicted + cpu->tmr_add;
+		if (htime == cpu->tmr_add)
+			htime = idx_restrict_time;
+		else if (htime > max_residency)
+			htime = max_residency;
+
+		if ((next_wakeup_us > htime) &&
+			((next_wakeup_us - htime) > max_residency))
+			histtimer_start(htime);
+	}
+
+done_select:
+	trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us);
+
+	trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0),
+			predicted, htime);
+
+	return best_level;
+}
+
+static unsigned int get_next_online_cpu(bool from_idle)
+{
+	unsigned int cpu;
+	ktime_t next_event;
+	unsigned int next_cpu = raw_smp_processor_id();
+
+	if (!from_idle)
+		return next_cpu;
+	next_event = KTIME_MAX;
+	for_each_online_cpu(cpu) {
+		ktime_t *next_event_c;
+
+		next_event_c = get_next_event_cpu(cpu);
+		if (*next_event_c < next_event) {
+			next_event = *next_event_c;
+			next_cpu = cpu;
+		}
+	}
+	return next_cpu;
+}
+
+static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
+		bool from_idle, uint32_t *pred_time)
+{
+	int cpu;
+	ktime_t next_event;
+	struct cpumask online_cpus_in_cluster;
+	struct lpm_history *history;
+	int64_t prediction = LONG_MAX;
+
+	if (!from_idle)
+		return ~0ULL;
+
+	next_event = KTIME_MAX;
+	cpumask_and(&online_cpus_in_cluster,
+			&cluster->num_children_in_sync, cpu_online_mask);
+
+	for_each_cpu(cpu, &online_cpus_in_cluster) {
+		ktime_t *next_event_c;
+
+		next_event_c = get_next_event_cpu(cpu);
+		if (*next_event_c < next_event)
+			next_event = *next_event_c;
+
+		if (from_idle && lpm_prediction && cluster->lpm_prediction) {
+			history = &per_cpu(hist, cpu);
+			if (history->stime && (history->stime < prediction))
+				prediction = history->stime;
+		}
+	}
+
+	if (from_idle && lpm_prediction && cluster->lpm_prediction) {
+		if (prediction > ktime_to_us(ktime_get()))
+			*pred_time = prediction - ktime_to_us(ktime_get());
+	}
+
+	if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
+		return ktime_to_us(ktime_sub(next_event, ktime_get()));
+	else
+		return 0;
+}
+
+static int cluster_predict(struct lpm_cluster *cluster,
+				uint32_t *pred_us)
+{
+	int i, j;
+	int ret = 0;
+	struct cluster_history *history = &cluster->history;
+	int64_t cur_time = ktime_to_us(ktime_get());
+
+	if (!lpm_prediction || !cluster->lpm_prediction)
+		return 0;
+
+	if (history->hinvalid) {
+		history->hinvalid = 0;
+		history->htmr_wkup = 1;
+		history->flag = 0;
+		return ret;
+	}
+
+	if (history->nsamp == MAXSAMPLES) {
+		for (i = 0; i < MAXSAMPLES; i++) {
+			if ((cur_time - history->stime[i])
+					> CLUST_SMPL_INVLD_TIME)
+				history->nsamp--;
+		}
+	}
+
+	if (history->nsamp < MAXSAMPLES) {
+		history->flag = 0;
+		return ret;
+	}
+
+	if (history->flag == 2)
+		history->flag = 0;
+
+	if (history->htmr_wkup != 1) {
+		uint64_t total = 0;
+
+		if (history->flag == 1) {
+			for (i = 0; i < MAXSAMPLES; i++)
+				total += history->resi[i];
+			do_div(total, MAXSAMPLES);
+			*pred_us = total;
+			return 2;
+		}
+
+		for (j = 1; j < cluster->nlevels; j++) {
+			uint32_t failed = 0;
+
+			total = 0;
+			for (i = 0; i < MAXSAMPLES; i++) {
+				if ((history->mode[i] == j) && (history->resi[i]
+				< cluster->levels[j].pwr.min_residency)) {
+					failed++;
+					total += history->resi[i];
+				}
+			}
+
+			if (failed > (MAXSAMPLES-2)) {
+				do_div(total, failed);
+				*pred_us = total;
+				history->flag = 1;
+				return 1;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static void update_cluster_history_time(struct cluster_history *history,
+						int idx, uint64_t start)
+{
+	history->entry_idx = idx;
+	history->entry_time = start;
+}
+
+static void update_cluster_history(struct cluster_history *history, int idx)
+{
+	uint32_t tmr = 0;
+	uint32_t residency = 0;
+	struct lpm_cluster *cluster =
+			container_of(history, struct lpm_cluster, history);
+
+	if (!lpm_prediction || !cluster->lpm_prediction)
+		return;
+
+	if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
+		residency = ktime_to_us(ktime_get()) - history->entry_time;
+		history->stime[history->hptr] = history->entry_time;
+	} else
+		return;
+
+	if (history->htmr_wkup) {
+		if (!history->hptr)
+			history->hptr = MAXSAMPLES-1;
+		else
+			history->hptr--;
+
+		history->resi[history->hptr] += residency;
+
+		history->htmr_wkup = 0;
+		tmr = 1;
+	} else
+		history->resi[history->hptr] = residency;
+
+	history->mode[history->hptr] = idx;
+
+	history->entry_idx = INT_MIN;
+	history->entry_time = 0;
+
+	if (history->nsamp < MAXSAMPLES)
+		history->nsamp++;
+
+	trace_cluster_pred_hist(cluster->cluster_name,
+		history->mode[history->hptr], history->resi[history->hptr],
+		history->hptr, tmr);
+
+	(history->hptr)++;
+
+	if (history->hptr >= MAXSAMPLES)
+		history->hptr = 0;
+}
+
+static void clear_cl_history_each(struct cluster_history *history)
+{
+	int i;
+
+	for (i = 0; i < MAXSAMPLES; i++) {
+		history->resi[i]  = 0;
+		history->mode[i] = -1;
+		history->stime[i] = 0;
+	}
+
+	history->hptr = 0;
+	history->nsamp = 0;
+	history->flag = 0;
+	history->hinvalid = 0;
+	history->htmr_wkup = 0;
+}
+static void clear_cl_predict_history(void)
+{
+	struct lpm_cluster *cluster = lpm_root_node;
+	struct list_head *list;
+
+	if (!lpm_prediction || !cluster->lpm_prediction)
+		return;
+
+	clear_cl_history_each(&cluster->history);
+
+	list_for_each(list, &cluster->child) {
+		struct lpm_cluster *n;
+
+		n = list_entry(list, typeof(*n), list);
+		clear_cl_history_each(&n->history);
+	}
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
+							int *ispred)
+{
+	int best_level = -1;
+	int i;
+	struct cpumask mask;
+	uint32_t latency_us = ~0U;
+	uint32_t sleep_us;
+	uint32_t cpupred_us = 0, pred_us = 0;
+	int pred_mode = 0, predicted = 0;
+
+	if (!cluster)
+		return -EINVAL;
+
+	sleep_us = (uint32_t)get_cluster_sleep_time(cluster,
+						from_idle, &cpupred_us);
+
+	if (from_idle) {
+		pred_mode = cluster_predict(cluster, &pred_us);
+
+		if (cpupred_us && pred_mode && (cpupred_us < pred_us))
+			pred_us = cpupred_us;
+
+		if (pred_us && pred_mode && (pred_us < sleep_us))
+			predicted = 1;
+
+		if (predicted && (pred_us == cpupred_us))
+			predicted = 2;
+	}
+
+	if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
+		latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
+							&mask);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *level = &cluster->levels[i];
+		struct power_params *pwr_params = &level->pwr;
+
+		if (!lpm_cluster_mode_allow(cluster, i, from_idle))
+			continue;
+
+		if (!cpumask_equal(&cluster->num_children_in_sync,
+					&level->num_cpu_votes))
+			continue;
+
+		if (from_idle && latency_us < pwr_params->exit_latency)
+			break;
+
+		if (sleep_us < (pwr_params->exit_latency +
+						pwr_params->entry_latency))
+			break;
+
+		if (suspend_in_progress && from_idle && level->notify_rpm)
+			continue;
+
+		if (level->notify_rpm) {
+			if (!(sys_pm_ops && sys_pm_ops->sleep_allowed))
+				continue;
+			if (!sys_pm_ops->sleep_allowed())
+				continue;
+		}
+
+		best_level = i;
+
+		if (from_idle &&
+			(predicted ? (pred_us <= pwr_params->max_residency)
+			: (sleep_us <= pwr_params->max_residency)))
+			break;
+	}
+
+	if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
+		cluster->history.flag = 2;
+
+	*ispred = predicted;
+
+	trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
+						latency_us, predicted, pred_us);
+
+	return best_level;
+}
+
+static int cluster_configure(struct lpm_cluster *cluster, int idx,
+		bool from_idle, int predicted)
+{
+	struct lpm_cluster_level *level = &cluster->levels[idx];
+	struct cpumask online_cpus, cpumask;
+	unsigned int cpu;
+
+	cpumask_and(&online_cpus, &cluster->num_children_in_sync,
+					cpu_online_mask);
+
+	if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
+				|| is_IPI_pending(&online_cpus))
+		return -EPERM;
+
+	if (idx != cluster->default_level) {
+		update_debug_pc_event(CLUSTER_ENTER, idx,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+		trace_cluster_enter(cluster->cluster_name, idx,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+		lpm_stats_cluster_enter(cluster->stats, idx);
+
+		if (from_idle && lpm_prediction && cluster->lpm_prediction)
+			update_cluster_history_time(&cluster->history, idx,
+						ktime_to_us(ktime_get()));
+	}
+
+	if (level->notify_rpm) {
+		/*
+		 * Print the clocks which are enabled during system suspend
+		 * This debug information is useful to know which are the
+		 * clocks that are enabled and preventing the system level
+		 * LPMs(XO and Vmin).
+		 */
+		if (!from_idle)
+			clock_debug_print_enabled();
+
+		cpu = get_next_online_cpu(from_idle);
+		cpumask_copy(&cpumask, cpumask_of(cpu));
+		clear_predict_history();
+		clear_cl_predict_history();
+		if (sys_pm_ops && sys_pm_ops->enter)
+			if ((sys_pm_ops->enter(&cpumask)))
+				return -EBUSY;
+	}
+
+	cluster->last_level = idx;
+
+	if (predicted && (idx < (cluster->nlevels - 1))) {
+		struct power_params *pwr_params = &cluster->levels[idx].pwr;
+
+		clusttimer_start(cluster, pwr_params->max_residency +
+							cluster->tmr_add);
+	}
+
+	return 0;
+}
+
+static void cluster_prepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t start_time)
+{
+	int i;
+	int predicted = 0;
+
+	if (!cluster)
+		return;
+
+	if (cluster->min_child_level > child_idx)
+		return;
+
+	spin_lock(&cluster->sync_lock);
+	cpumask_or(&cluster->num_children_in_sync, cpu,
+			&cluster->num_children_in_sync);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+		if (child_idx >= lvl->min_child_level)
+			cpumask_or(&lvl->num_cpu_votes, cpu,
+					&lvl->num_cpu_votes);
+	}
+
+	/*
+	 * cluster_select() does not make any configuration changes. So its ok
+	 * to release the lock here. If a core wakes up for a rude request,
+	 * it need not wait for another to finish its cluster selection and
+	 * configuration process
+	 */
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus))
+		goto failed;
+
+	i = cluster_select(cluster, from_idle, &predicted);
+
+	if (((i < 0) || (i == cluster->default_level))
+				&& predicted && from_idle) {
+		update_cluster_history_time(&cluster->history,
+					-1, ktime_to_us(ktime_get()));
+
+		if (i < 0) {
+			struct power_params *pwr_params =
+						&cluster->levels[0].pwr;
+
+			clusttimer_start(cluster,
+					pwr_params->max_residency +
+					cluster->tmr_add);
+
+			goto failed;
+		}
+	}
+
+	if (i < 0)
+		goto failed;
+
+	if (cluster_configure(cluster, i, from_idle, predicted))
+		goto failed;
+
+	cluster->stats->sleep_time = start_time;
+	cluster_prepare(cluster->parent, &cluster->num_children_in_sync, i,
+			from_idle, start_time);
+
+	spin_unlock(&cluster->sync_lock);
+	return;
+failed:
+	spin_unlock(&cluster->sync_lock);
+	cluster->stats->sleep_time = 0;
+}
+
+static void cluster_unprepare(struct lpm_cluster *cluster,
+		const struct cpumask *cpu, int child_idx, bool from_idle,
+		int64_t end_time)
+{
+	struct lpm_cluster_level *level;
+	bool first_cpu;
+	int last_level, i;
+
+	if (!cluster)
+		return;
+
+	if (cluster->min_child_level > child_idx)
+		return;
+
+	spin_lock(&cluster->sync_lock);
+	last_level = cluster->default_level;
+	first_cpu = cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus);
+	cpumask_andnot(&cluster->num_children_in_sync,
+			&cluster->num_children_in_sync, cpu);
+
+	for (i = 0; i < cluster->nlevels; i++) {
+		struct lpm_cluster_level *lvl = &cluster->levels[i];
+
+		if (child_idx >= lvl->min_child_level)
+			cpumask_andnot(&lvl->num_cpu_votes,
+					&lvl->num_cpu_votes, cpu);
+	}
+
+	if (from_idle && first_cpu &&
+		(cluster->last_level == cluster->default_level))
+		update_cluster_history(&cluster->history, cluster->last_level);
+
+	if (!first_cpu || cluster->last_level == cluster->default_level)
+		goto unlock_return;
+
+	if (cluster->stats->sleep_time)
+		cluster->stats->sleep_time = end_time -
+			cluster->stats->sleep_time;
+	lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
+
+	level = &cluster->levels[cluster->last_level];
+
+	if (level->notify_rpm)
+		if (sys_pm_ops && sys_pm_ops->exit)
+			sys_pm_ops->exit();
+
+	update_debug_pc_event(CLUSTER_EXIT, cluster->last_level,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+	trace_cluster_exit(cluster->cluster_name, cluster->last_level,
+			cluster->num_children_in_sync.bits[0],
+			cluster->child_cpus.bits[0], from_idle);
+
+	last_level = cluster->last_level;
+	cluster->last_level = cluster->default_level;
+
+	if (from_idle)
+		update_cluster_history(&cluster->history, last_level);
+
+	cluster_unprepare(cluster->parent, &cluster->child_cpus,
+			last_level, from_idle, end_time);
+unlock_return:
+	spin_unlock(&cluster->sync_lock);
+}
+
+static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
+				bool from_idle)
+{
+	struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
+
+	/* Use broadcast timer for aggregating sleep mode within a cluster.
+	 * A broadcast timer could be used in the following scenarios
+	 * 1) The architected timer HW gets reset during certain low power
+	 * modes and the core relies on a external(broadcast) timer to wake up
+	 * from sleep. This information is passed through device tree.
+	 * 2) The CPU low power mode could trigger a system low power mode.
+	 * The low power module relies on Broadcast timer to aggregate the
+	 * next wakeup within a cluster, in which case, CPU switches over to
+	 * use broadcast timer.
+	 */
+
+	if (from_idle && cpu_level->is_reset)
+		cpu_pm_enter();
+
+}
+
+static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
+				bool from_idle)
+{
+	struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
+
+	if (from_idle && cpu_level->is_reset)
+		cpu_pm_exit();
+}
+
+static int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl,
+				bool from_idle)
+{
+	int state_id = 0;
+
+	if (!cluster)
+		return 0;
+
+	spin_lock(&cluster->sync_lock);
+
+	if (!cpumask_equal(&cluster->num_children_in_sync,
+				&cluster->child_cpus))
+		goto unlock_and_return;
+
+	state_id += get_cluster_id(cluster->parent, aff_lvl, from_idle);
+
+	if (cluster->last_level != cluster->default_level) {
+		struct lpm_cluster_level *level
+			= &cluster->levels[cluster->last_level];
+
+		state_id += (level->psci_id & cluster->psci_mode_mask)
+					<< cluster->psci_mode_shift;
+
+		/*
+		 * We may have updated the broadcast timers, update
+		 * the wakeup value by reading the bc timer directly.
+		 */
+		if (level->notify_rpm)
+			if (sys_pm_ops && sys_pm_ops->update_wakeup)
+				sys_pm_ops->update_wakeup(from_idle);
+		if (cluster->psci_mode_shift)
+			(*aff_lvl)++;
+	}
+unlock_and_return:
+	spin_unlock(&cluster->sync_lock);
+	return state_id;
+}
+
+static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
+{
+	int affinity_level = 0, state_id = 0, power_state = 0;
+	bool success = false;
+	/*
+	 * idx = 0 is the default LPM state
+	 */
+
+	if (!idx) {
+		stop_critical_timings();
+		wfi();
+		start_critical_timings();
+		return true;
+	}
+
+	if (from_idle && cpu->levels[idx].use_bc_timer) {
+		if (tick_broadcast_enter())
+			return success;
+	}
+
+	state_id = get_cluster_id(cpu->parent, &affinity_level, from_idle);
+	power_state = PSCI_POWER_STATE(cpu->levels[idx].is_reset);
+	affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
+	state_id += power_state + affinity_level + cpu->levels[idx].psci_id;
+
+	update_debug_pc_event(CPU_ENTER, state_id,
+			0xdeaffeed, 0xdeaffeed, from_idle);
+	stop_critical_timings();
+
+	success = !arm_cpuidle_suspend(state_id);
+
+	start_critical_timings();
+	update_debug_pc_event(CPU_EXIT, state_id,
+			success, 0xdeaffeed, from_idle);
+
+	if (from_idle && cpu->levels[idx].use_bc_timer)
+		tick_broadcast_exit();
+
+	return success;
+}
+
+static int lpm_cpuidle_select(struct cpuidle_driver *drv,
+		struct cpuidle_device *dev, bool *stop_tick)
+{
+	struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
+
+	if (!cpu)
+		return 0;
+
+	return cpu_power_select(dev, cpu);
+}
+
+static void update_history(struct cpuidle_device *dev, int idx)
+{
+	struct lpm_history *history = &per_cpu(hist, dev->cpu);
+	uint32_t tmr = 0;
+	struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, dev->cpu);
+
+	if (!lpm_prediction || !lpm_cpu->lpm_prediction)
+		return;
+
+	if (history->htmr_wkup) {
+		if (!history->hptr)
+			history->hptr = MAXSAMPLES-1;
+		else
+			history->hptr--;
+
+		history->resi[history->hptr] += dev->last_residency;
+		history->htmr_wkup = 0;
+		tmr = 1;
+	} else
+		history->resi[history->hptr] = dev->last_residency;
+
+	history->mode[history->hptr] = idx;
+
+	trace_cpu_pred_hist(history->mode[history->hptr],
+		history->resi[history->hptr], history->hptr, tmr);
+
+	if (history->nsamp < MAXSAMPLES)
+		history->nsamp++;
+
+	(history->hptr)++;
+	if (history->hptr >= MAXSAMPLES)
+		history->hptr = 0;
+}
+
+static int lpm_cpuidle_enter(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int idx)
+{
+	struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
+	bool success = false;
+	const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
+	ktime_t start = ktime_get();
+	uint64_t start_time = ktime_to_ns(start), end_time;
+
+	cpu_prepare(cpu, idx, true);
+	cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
+
+	trace_cpu_idle_enter(idx);
+	lpm_stats_cpu_enter(idx, start_time);
+
+	if (need_resched())
+		goto exit;
+
+	success = psci_enter_sleep(cpu, idx, true);
+
+exit:
+	end_time = ktime_to_ns(ktime_get());
+	lpm_stats_cpu_exit(idx, end_time, success);
+
+	cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
+	cpu_unprepare(cpu, idx, true);
+	dev->last_residency = ktime_us_delta(ktime_get(), start);
+	update_history(dev, idx);
+	trace_cpu_idle_exit(idx, success);
+	local_irq_enable();
+	if (lpm_prediction && cpu->lpm_prediction) {
+		histtimer_cancel();
+		clusttimer_cancel();
+	}
+	return idx;
+}
+
+static void lpm_cpuidle_s2idle(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int idx)
+{
+	struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
+	const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
+
+	for (; idx >= 0; idx--) {
+		if (lpm_cpu_mode_allow(dev->cpu, idx, false))
+			break;
+	}
+	if (idx < 0) {
+		pr_err("Failed suspend\n");
+		return;
+	}
+
+	cpu_prepare(cpu, idx, true);
+	cluster_prepare(cpu->parent, cpumask, idx, false, 0);
+
+	psci_enter_sleep(cpu, idx, false);
+
+	cluster_unprepare(cpu->parent, cpumask, idx, false, 0);
+	cpu_unprepare(cpu, idx, true);
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+		struct cpumask *mask)
+{
+	struct cpuidle_device *device;
+	int cpu, ret;
+
+
+	if (!mask || !drv)
+		return -EINVAL;
+
+	drv->cpumask = mask;
+	ret = cpuidle_register_driver(drv);
+	if (ret) {
+		pr_err("Failed to register cpuidle driver %d\n", ret);
+		goto failed_driver_register;
+	}
+
+	for_each_cpu(cpu, mask) {
+		device = &per_cpu(cpuidle_dev, cpu);
+		device->cpu = cpu;
+
+		ret = cpuidle_register_device(device);
+		if (ret) {
+			pr_err("Failed to register cpuidle driver for cpu:%u\n",
+					cpu);
+			goto failed_driver_register;
+		}
+	}
+	return ret;
+failed_driver_register:
+	for_each_cpu(cpu, mask)
+		cpuidle_unregister_driver(drv);
+	return ret;
+}
+#else
+static int cpuidle_register_cpu(struct cpuidle_driver *drv,
+		struct  cpumask *mask)
+{
+	return cpuidle_register(drv, NULL);
+}
+#endif
+
+static struct cpuidle_governor lpm_governor = {
+	.name =		"qcom",
+	.rating =	30,
+	.select =	lpm_cpuidle_select,
+};
+
+static int cluster_cpuidle_register(struct lpm_cluster *cl)
+{
+	int i = 0, ret = 0;
+	unsigned int cpu;
+	struct lpm_cluster *p = NULL;
+	struct lpm_cpu *lpm_cpu;
+
+	if (list_empty(&cl->cpu)) {
+		struct lpm_cluster *n;
+
+		list_for_each_entry(n, &cl->child, list) {
+			ret = cluster_cpuidle_register(n);
+			if (ret)
+				break;
+		}
+		return ret;
+	}
+
+	list_for_each_entry(lpm_cpu, &cl->cpu, list) {
+		lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
+		if (!lpm_cpu->drv)
+			return -ENOMEM;
+
+		lpm_cpu->drv->name = "msm_idle";
+
+		for (i = 0; i < lpm_cpu->nlevels; i++) {
+			struct cpuidle_state *st = &lpm_cpu->drv->states[i];
+			struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
+
+			snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+			snprintf(st->desc, CPUIDLE_DESC_LEN, "%s",
+					cpu_level->name);
+			st->flags = 0;
+			st->exit_latency = cpu_level->pwr.exit_latency;
+			st->target_residency = 0;
+			st->enter = lpm_cpuidle_enter;
+			if (i == lpm_cpu->nlevels - 1)
+				st->enter_s2idle = lpm_cpuidle_s2idle;
+		}
+
+		lpm_cpu->drv->state_count = lpm_cpu->nlevels;
+		lpm_cpu->drv->safe_state_index = 0;
+		for_each_cpu(cpu, &lpm_cpu->related_cpus)
+			per_cpu(cpu_lpm, cpu) = lpm_cpu;
+
+		for_each_possible_cpu(cpu) {
+			if (cpu_online(cpu))
+				continue;
+			if (per_cpu(cpu_lpm, cpu))
+				p = per_cpu(cpu_lpm, cpu)->parent;
+			while (p) {
+				int j;
+
+				spin_lock(&p->sync_lock);
+				cpumask_set_cpu(cpu, &p->num_children_in_sync);
+				for (j = 0; j < p->nlevels; j++)
+					cpumask_copy(
+						&p->levels[j].num_cpu_votes,
+						&p->num_children_in_sync);
+				spin_unlock(&p->sync_lock);
+				p = p->parent;
+			}
+		}
+		ret = cpuidle_register_cpu(lpm_cpu->drv,
+					&lpm_cpu->related_cpus);
+
+		if (ret) {
+			kfree(lpm_cpu->drv);
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/**
+ * init_lpm - initializes the governor
+ */
+static int __init init_lpm(void)
+{
+	return cpuidle_register_governor(&lpm_governor);
+}
+
+postcore_initcall(init_lpm);
+
+static void register_cpu_lpm_stats(struct lpm_cpu *cpu,
+		struct lpm_cluster *parent)
+{
+	const char **level_name;
+	int i;
+
+	level_name = kcalloc(cpu->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+	if (!level_name)
+		return;
+
+	for (i = 0; i < cpu->nlevels; i++)
+		level_name[i] = cpu->levels[i].name;
+
+	lpm_stats_config_level("cpu", level_name, cpu->nlevels,
+			parent->stats, &cpu->related_cpus);
+
+	kfree(level_name);
+}
+
+static void register_cluster_lpm_stats(struct lpm_cluster *cl,
+		struct lpm_cluster *parent)
+{
+	const char **level_name;
+	struct lpm_cluster *child;
+	struct lpm_cpu *cpu;
+	int i;
+
+	if (!cl)
+		return;
+
+	level_name = kcalloc(cl->nlevels, sizeof(*level_name), GFP_KERNEL);
+
+	if (!level_name)
+		return;
+
+	for (i = 0; i < cl->nlevels; i++)
+		level_name[i] = cl->levels[i].level_name;
+
+	cl->stats = lpm_stats_config_level(cl->cluster_name, level_name,
+			cl->nlevels, parent ? parent->stats : NULL, NULL);
+
+	kfree(level_name);
+
+	list_for_each_entry(cpu, &cl->cpu, list) {
+		pr_err("%s()\n", __func__);
+		register_cpu_lpm_stats(cpu, cl);
+	}
+	if (!list_empty(&cl->cpu))
+		return;
+
+	list_for_each_entry(child, &cl->child, list)
+		register_cluster_lpm_stats(child, cl);
+}
+
+static int lpm_suspend_prepare(void)
+{
+	suspend_in_progress = true;
+	lpm_stats_suspend_enter();
+
+	return 0;
+}
+
+static void lpm_suspend_wake(void)
+{
+	suspend_in_progress = false;
+	lpm_stats_suspend_exit();
+}
+
+static int lpm_suspend_enter(suspend_state_t state)
+{
+	int cpu = raw_smp_processor_id();
+	struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
+	struct lpm_cluster *cluster = lpm_cpu->parent;
+	const struct cpumask *cpumask = get_cpu_mask(cpu);
+	int idx;
+
+	for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
+		if (lpm_cpu_mode_allow(cpu, idx, false))
+			break;
+	}
+	if (idx < 0) {
+		pr_err("Failed suspend\n");
+		return 0;
+	}
+	cpu_prepare(lpm_cpu, idx, false);
+	cluster_prepare(cluster, cpumask, idx, false, 0);
+
+	psci_enter_sleep(lpm_cpu, idx, false);
+
+	cluster_unprepare(cluster, cpumask, idx, false, 0);
+	cpu_unprepare(lpm_cpu, idx, false);
+	return 0;
+}
+
+static const struct platform_suspend_ops lpm_suspend_ops = {
+	.enter = lpm_suspend_enter,
+	.valid = suspend_valid_only_mem,
+	.prepare_late = lpm_suspend_prepare,
+	.wake = lpm_suspend_wake,
+};
+
+static const struct platform_s2idle_ops lpm_s2idle_ops = {
+	.prepare = lpm_suspend_prepare,
+	.restore = lpm_suspend_wake,
+};
+
+static int lpm_probe(struct platform_device *pdev)
+{
+	int ret;
+	int size;
+	unsigned int cpu;
+	struct hrtimer *cpu_histtimer;
+	struct kobject *module_kobj = NULL;
+	struct md_region md_entry;
+
+	get_online_cpus();
+	lpm_root_node = lpm_of_parse_cluster(pdev);
+
+	if (IS_ERR_OR_NULL(lpm_root_node)) {
+		pr_err("Failed to probe low power modes\n");
+		put_online_cpus();
+		return PTR_ERR(lpm_root_node);
+	}
+
+	if (print_parsed_dt)
+		cluster_dt_walkthrough(lpm_root_node);
+
+	/*
+	 * Register hotplug notifier before broadcast time to ensure there
+	 * to prevent race where a broadcast timer might not be setup on for a
+	 * core.  BUG in existing code but no known issues possibly because of
+	 * how late lpm_levels gets initialized.
+	 */
+	suspend_set_ops(&lpm_suspend_ops);
+	s2idle_set_ops(&lpm_s2idle_ops);
+	hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	for_each_possible_cpu(cpu) {
+		cpu_histtimer = &per_cpu(histtimer, cpu);
+		hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	}
+
+	cluster_timer_init(lpm_root_node);
+
+	size = num_dbg_elements * sizeof(struct lpm_debug);
+	lpm_debug = dma_alloc_coherent(&pdev->dev, size,
+			&lpm_debug_phys, GFP_KERNEL);
+
+	register_cluster_lpm_stats(lpm_root_node, NULL);
+
+	ret = cluster_cpuidle_register(lpm_root_node);
+	put_online_cpus();
+	if (ret) {
+		pr_err("Failed to register with cpuidle framework\n");
+		goto failed;
+	}
+
+	ret = cpuhp_setup_state(CPUHP_AP_QCOM_SLEEP_STARTING,
+			"AP_QCOM_SLEEP_STARTING",
+			lpm_starting_cpu, lpm_dying_cpu);
+	if (ret)
+		goto failed;
+
+	module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+	if (!module_kobj) {
+		pr_err("Cannot find kobject for module %s\n", KBUILD_MODNAME);
+		ret = -ENOENT;
+		goto failed;
+	}
+
+	ret = create_cluster_lvl_nodes(lpm_root_node, module_kobj);
+	if (ret) {
+		pr_err("Failed to create cluster level nodes\n");
+		goto failed;
+	}
+
+	/* Add lpm_debug to Minidump*/
+	strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
+	md_entry.virt_addr = (uintptr_t)lpm_debug;
+	md_entry.phys_addr = lpm_debug_phys;
+	md_entry.size = size;
+	if (msm_minidump_add_region(&md_entry))
+		pr_info("Failed to add lpm_debug in Minidump\n");
+
+	return 0;
+failed:
+	free_cluster_node(lpm_root_node);
+	lpm_root_node = NULL;
+	return ret;
+}
+
+static const struct of_device_id lpm_mtch_tbl[] = {
+	{.compatible = "qcom,lpm-levels"},
+	{},
+};
+
+static struct platform_driver lpm_driver = {
+	.probe = lpm_probe,
+	.driver = {
+		.name = "lpm-levels",
+		.suppress_bind_attrs = true,
+		.of_match_table = lpm_mtch_tbl,
+	},
+};
+
+static int __init lpm_levels_module_init(void)
+{
+	int rc;
+
+#ifdef CONFIG_ARM
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		rc = arm_cpuidle_init(cpu);
+		if (rc) {
+			pr_err("CPU%d ARM CPUidle init failed (%d)\n", cpu, rc);
+			return rc;
+		}
+	}
+#endif
+
+	rc = platform_driver_register(&lpm_driver);
+	if (rc)
+		pr_info("Error registering %s rc=%d\n", lpm_driver.driver.name,
+									rc);
+
+	return rc;
+}
+late_initcall(lpm_levels_module_init);
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
new file mode 100644
index 0000000..e1ec2a6
--- /dev/null
+++ b/drivers/cpuidle/lpm-levels.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <soc/qcom/pm.h>
+
+#define NR_LPM_LEVELS 8
+#define MAXSAMPLES 5
+#define CLUST_SMPL_INVLD_TIME 40000
+#define DEFAULT_PREMATURE_CNT 3
+#define DEFAULT_STDDEV 100
+#define DEFAULT_TIMER_ADD 100
+#define TIMER_ADD_LOW 100
+#define TIMER_ADD_HIGH 1500
+#define STDDEV_LOW 100
+#define STDDEV_HIGH 1000
+#define PREMATURE_CNT_LOW 1
+#define PREMATURE_CNT_HIGH 5
+
+struct power_params {
+	uint32_t entry_latency;		/* Entry latency */
+	uint32_t exit_latency;		/* Exit latency */
+	uint32_t min_residency;
+	uint32_t max_residency;
+};
+
+struct lpm_cpu_level {
+	const char *name;
+	bool use_bc_timer;
+	struct power_params pwr;
+	unsigned int psci_id;
+	bool is_reset;
+	int reset_level;
+};
+
+struct lpm_cpu {
+	struct list_head list;
+	struct cpumask related_cpus;
+	struct lpm_cpu_level levels[NR_LPM_LEVELS];
+	int nlevels;
+	unsigned int psci_mode_shift;
+	unsigned int psci_mode_mask;
+	uint32_t ref_stddev;
+	uint32_t ref_premature_cnt;
+	uint32_t tmr_add;
+	bool lpm_prediction;
+	struct cpuidle_driver *drv;
+	struct lpm_cluster *parent;
+};
+
+struct lpm_level_avail {
+	bool idle_enabled;
+	bool suspend_enabled;
+	uint32_t exit_latency;
+	struct kobject *kobj;
+	struct kobj_attribute idle_enabled_attr;
+	struct kobj_attribute suspend_enabled_attr;
+	struct kobj_attribute latency_attr;
+	void *data;
+	int idx;
+	bool cpu_node;
+};
+
+struct lpm_cluster_level {
+	const char *level_name;
+	int min_child_level;
+	struct cpumask num_cpu_votes;
+	struct power_params pwr;
+	bool notify_rpm;
+	bool sync_level;
+	struct lpm_level_avail available;
+	unsigned int psci_id;
+	bool is_reset;
+	int reset_level;
+};
+
+struct cluster_history {
+	uint32_t resi[MAXSAMPLES];
+	int mode[MAXSAMPLES];
+	int64_t stime[MAXSAMPLES];
+	uint32_t hptr;
+	uint32_t hinvalid;
+	uint32_t htmr_wkup;
+	uint64_t entry_time;
+	int entry_idx;
+	int nsamp;
+	int flag;
+};
+
+struct lpm_cluster {
+	struct list_head list;
+	struct list_head child;
+	const char *cluster_name;
+	unsigned long aff_level; /* Affinity level of the node */
+	struct lpm_cluster_level levels[NR_LPM_LEVELS];
+	int nlevels;
+	int min_child_level;
+	int default_level;
+	int last_level;
+	uint32_t tmr_add;
+	bool lpm_prediction;
+	struct list_head cpu;
+	spinlock_t sync_lock;
+	struct cpumask child_cpus;
+	struct cpumask num_children_in_sync;
+	struct lpm_cluster *parent;
+	struct lpm_stats *stats;
+	unsigned int psci_mode_shift;
+	unsigned int psci_mode_mask;
+	struct cluster_history history;
+	struct hrtimer histtimer;
+};
+
+struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
+void free_cluster_node(struct lpm_cluster *cluster);
+void cluster_dt_walkthrough(struct lpm_cluster *cluster);
+
+int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
+int lpm_cpu_mode_allow(unsigned int cpu,
+		unsigned int mode, bool from_idle);
+bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
+		unsigned int mode, bool from_idle);
+uint32_t *get_per_cpu_max_residency(int cpu);
+uint32_t *get_per_cpu_min_residency(int cpu);
+extern struct lpm_cluster *lpm_root_node;
+
+#if defined(CONFIG_SMP)
+extern DEFINE_PER_CPU(bool, pending_ipi);
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, mask) {
+		if per_cpu(pending_ipi, cpu)
+			return true;
+	}
+	return false;
+}
+#else
+static inline bool is_IPI_pending(const struct cpumask *mask)
+{
+	return false;
+}
+#endif
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 699f6e5..e6e6d0e2 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -486,4 +486,73 @@
 	  clients to communicate over G-Link via device nodes.
 	  This enable the userspace clients to read and write to
 	  some glink packets channel.
+
+config QTI_SYSTEM_PM
+	bool
+
+config MSM_EVENT_TIMER
+	bool "Event timer"
+        help
+	  This option enables a modules that manages a list of event timers
+	  that need to be monitored by the PM. The enables the PM code to
+	  monitor events that require the core to be awake and ready to
+	  handle the event.
+
+config MSM_PM
+	depends on PM
+	select MSM_IDLE_STATS if DEBUG_FS
+	select CPU_IDLE_MULTIPLE_DRIVERS
+	select QTI_SYSTEM_PM if QCOM_RPMH
+	bool "Qualcomm Technologies, Inc. (QTI) Power Management Drivers"
+	help
+	  Platform specific power driver to manage cores and l2 low power
+	  modes. It interface with various system driver and put the cores
+	  into low power modes. It implements OS initiated scheme and
+	  determines last CPU to call into PSCI for cluster Low power
+	  modes.
+
+if MSM_PM
+menuconfig MSM_IDLE_STATS
+	bool "Collect idle statistics"
+	help
+	  Collect cores various low power mode idle statistics
+	  and export them in proc/msm_pm_stats. User can read
+	  this data and determine what low power modes and how
+	  many times cores have entered into LPM modes.
+
+if MSM_IDLE_STATS
+
+config MSM_IDLE_STATS_FIRST_BUCKET
+	int "First bucket time"
+	default 62500
+	help
+	  Upper time limit in nanoseconds of first bucket.
+
+config MSM_IDLE_STATS_BUCKET_SHIFT
+	int "Bucket shift"
+	default 2
+
+config MSM_IDLE_STATS_BUCKET_COUNT
+	int "Bucket count"
+	default 10
+
+config MSM_SUSPEND_STATS_FIRST_BUCKET
+	int "First bucket time for suspend"
+	default 1000000000
+	help
+	  Upper time limit in nanoseconds of first bucket of the
+	  histogram.  This is for collecting statistics on suspend.
+
+endif # MSM_IDLE_STATS
+endif # MSM_PM
+
+config QTI_RPM_STATS_LOG
+	bool "Qualcomm Technologies RPM Stats Driver"
+	depends on QCOM_RPMH
+	help
+	  This option enables a driver which reads RPM messages from a shared
+	  memory location. These messages provide statistical information about
+	  the low power modes that RPM enters. The drivers outputs the message
+	  via a sysfs node.
+
 endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 0853636..307ca37 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -57,3 +57,8 @@
 obj-$(CONFIG_QSEE_IPC_IRQ) += qsee_ipc_irq.o
 obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
 obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
+obj-$(CONFIG_QTI_SYSTEM_PM) += system_pm.o
+obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o
+obj-$(CONFIG_MSM_IDLE_STATS)	+= lpm-stats.o
+obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpmh_master_stat.o
+obj-$(CONFIG_QTI_RPM_STATS_LOG) += rpm_stats.o
diff --git a/drivers/soc/qcom/event_timer.c b/drivers/soc/qcom/event_timer.c
new file mode 100644
index 0000000..250c2e9
--- /dev/null
+++ b/drivers/soc/qcom/event_timer.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <soc/qcom/event_timer.h>
+
+/**
+ * struct event_timer_info - basic event timer structure
+ * @node: timerqueue node to track time ordered data structure
+ *        of event timers
+ * @notify: irq affinity notifier.
+ * @timer: hrtimer created for this event.
+ * @function : callback function for event timer.
+ * @data : callback data for event timer.
+ * @irq: irq number for which event timer is created.
+ * @cpu: event timer associated cpu.
+ */
+struct event_timer_info {
+	struct timerqueue_node node;
+	struct irq_affinity_notify notify;
+	void (*function)(void *v);
+	void *data;
+	int irq;
+	int cpu;
+};
+
+struct hrtimer_info {
+	struct hrtimer event_hrtimer;
+	bool timer_initialized;
+};
+
+static DEFINE_PER_CPU(struct hrtimer_info, per_cpu_hrtimer);
+
+static DEFINE_PER_CPU(struct timerqueue_head, timer_head) = {
+	.head = RB_ROOT,
+	.next = NULL,
+};
+
+static DEFINE_SPINLOCK(event_timer_lock);
+static DEFINE_SPINLOCK(event_setup_lock);
+
+static void create_timer_smp(void *data);
+static void setup_event_hrtimer(struct event_timer_info *event);
+static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer);
+static void irq_affinity_change_notifier(struct irq_affinity_notify *notify,
+						const cpumask_t *new_cpu_mask);
+static void irq_affinity_release(struct kref *ref);
+
+static int msm_event_debug_mask;
+module_param_named(debug_mask, msm_event_debug_mask, int, 0664);
+
+enum {
+	MSM_EVENT_TIMER_DEBUG = 1U << 0,
+};
+
+/**
+ * add_event_timer() : Add a wakeup event. Intended to be called
+ *                     by clients once. Returns a handle to be used
+ *                     for future transactions.
+ * @irq: event associated irq number.
+ * @function : The callback function will be called when event
+ *             timer expires.
+ * @data: callback data provided by client.
+ */
+struct event_timer_info *add_event_timer(uint32_t irq,
+				void (*function)(void *), void *data)
+{
+	struct event_timer_info *event_info =
+			kzalloc(sizeof(struct event_timer_info), GFP_KERNEL);
+
+	if (!event_info)
+		return NULL;
+
+	event_info->function = function;
+	event_info->data = data;
+
+	if (irq) {
+		struct irq_desc *desc = irq_to_desc(irq);
+		struct cpumask *mask = desc->irq_common_data.affinity;
+
+		get_online_cpus();
+		event_info->cpu = cpumask_any_and(mask, cpu_online_mask);
+		if (event_info->cpu >= nr_cpu_ids)
+			event_info->cpu = cpumask_first(cpu_online_mask);
+
+		event_info->notify.notify = irq_affinity_change_notifier;
+		event_info->notify.release = irq_affinity_release;
+		irq_set_affinity_notifier(irq, &event_info->notify);
+		put_online_cpus();
+	}
+
+	/* Init rb node and hr timer */
+	timerqueue_init(&event_info->node);
+	pr_debug("New Event Added. Event %pK(on cpu%d). irq %d.\n",
+					event_info, event_info->cpu, irq);
+
+	return event_info;
+}
+EXPORT_SYMBOL(add_event_timer);
+
+/**
+ * is_event_next(): Helper function to check if the event is the next
+ *                  expiring event
+ * @event : handle to the event to be checked.
+ */
+static bool is_event_next(struct event_timer_info *event)
+{
+	struct event_timer_info *next_event;
+	struct timerqueue_node *next;
+	bool ret = false;
+
+	next = timerqueue_getnext(&per_cpu(timer_head, event->cpu));
+	if (!next)
+		goto exit_is_next_event;
+
+	next_event = container_of(next, struct event_timer_info, node);
+	if (!next_event)
+		goto exit_is_next_event;
+
+	if (next_event == event)
+		ret = true;
+
+exit_is_next_event:
+	return ret;
+}
+
+/**
+ * is_event_active(): Helper function to check if the timer for a given event
+ *                    has been started.
+ * @event : handle to the event to be checked.
+ */
+static bool is_event_active(struct event_timer_info *event)
+{
+	struct timerqueue_node *next;
+	struct event_timer_info *next_event;
+	bool ret = false;
+
+	for (next = timerqueue_getnext(&per_cpu(timer_head, event->cpu)); next;
+			next = timerqueue_iterate_next(next)) {
+		next_event = container_of(next, struct event_timer_info, node);
+
+		if (event == next_event) {
+			ret = true;
+			break;
+		}
+	}
+	return ret;
+}
+
+/**
+ * create_hrtimer(): Helper function to setup hrtimer.
+ */
+static void create_hrtimer(struct event_timer_info *event)
+
+{
+	bool timer_initialized = per_cpu(per_cpu_hrtimer.timer_initialized,
+								event->cpu);
+	struct hrtimer *event_hrtimer = &per_cpu(per_cpu_hrtimer.event_hrtimer,
+								event->cpu);
+
+	if (!timer_initialized) {
+		hrtimer_init(event_hrtimer, CLOCK_MONOTONIC,
+						HRTIMER_MODE_ABS_PINNED);
+		per_cpu(per_cpu_hrtimer.timer_initialized, event->cpu) = true;
+	}
+
+	event_hrtimer->function = event_hrtimer_cb;
+	hrtimer_start(event_hrtimer, event->node.expires,
+					HRTIMER_MODE_ABS_PINNED);
+}
+
+/**
+ * event_hrtimer_cb() : Callback function for hr timer.
+ *                      Make the client CB from here and remove the event
+ *                      from the time ordered queue.
+ */
+static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer)
+{
+	struct event_timer_info *event;
+	struct timerqueue_node *next;
+	unsigned long flags;
+	int cpu;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	cpu = smp_processor_id();
+	next = timerqueue_getnext(&per_cpu(timer_head, cpu));
+
+	while (next && (ktime_to_ns(next->expires)
+		<= ktime_to_ns(hrtimer->node.expires))) {
+		event = container_of(next, struct event_timer_info, node);
+		if (!event)
+			goto hrtimer_cb_exit;
+
+		WARN_ON_ONCE(event->cpu != cpu);
+
+		if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+			pr_debug("Deleting event %pK @ %lu(on cpu%d)\n", event,
+				(unsigned long)ktime_to_ns(next->expires), cpu);
+
+		timerqueue_del(&per_cpu(timer_head, cpu), &event->node);
+
+		if (event->function)
+			event->function(event->data);
+
+		next = timerqueue_getnext(&per_cpu(timer_head, cpu));
+	}
+
+	if (next) {
+		event = container_of(next, struct event_timer_info, node);
+		create_hrtimer(event);
+	}
+hrtimer_cb_exit:
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+	return HRTIMER_NORESTART;
+}
+
+/**
+ * create_timer_smp(): Helper function used setting up timer on CPUs.
+ */
+static void create_timer_smp(void *data)
+{
+	unsigned long flags;
+	struct event_timer_info *event =
+		(struct event_timer_info *)data;
+	struct timerqueue_node *next;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+
+	if (is_event_active(event))
+		timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
+
+	next = timerqueue_getnext(&per_cpu(timer_head, event->cpu));
+	timerqueue_add(&per_cpu(timer_head, event->cpu), &event->node);
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Adding Event %pK(on cpu%d) for %lu\n", event,
+		event->cpu,
+		(unsigned long)ktime_to_ns(event->node.expires));
+
+	if (!next || (next && (ktime_to_ns(event->node.expires) <
+						ktime_to_ns(next->expires)))) {
+		if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+			pr_debug("Setting timer for %lu(on cpu%d)\n",
+			(unsigned long)ktime_to_ns(event->node.expires),
+			event->cpu);
+
+		create_hrtimer(event);
+	}
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+}
+
+/**
+ *  setup_timer() : Helper function to setup timer on primary
+ *                  core during hrtimer callback.
+ *  @event: event handle causing the wakeup.
+ */
+static void setup_event_hrtimer(struct event_timer_info *event)
+{
+	smp_call_function_single(event->cpu, create_timer_smp, event, 1);
+}
+
+static void irq_affinity_release(struct kref *ref)
+{
+	struct event_timer_info *event;
+	struct irq_affinity_notify *notify =
+			container_of(ref, struct irq_affinity_notify, kref);
+
+	event = container_of(notify, struct event_timer_info, notify);
+	pr_debug("event = %pK\n", event);
+}
+
+static void irq_affinity_change_notifier(struct irq_affinity_notify *notify,
+						const cpumask_t *mask_val)
+{
+	struct event_timer_info *event;
+	unsigned long flags;
+	unsigned int irq;
+	int old_cpu = -EINVAL, new_cpu = -EINVAL;
+	bool next_event = false;
+
+	event = container_of(notify, struct event_timer_info, notify);
+	irq = notify->irq;
+
+	if (!event)
+		return;
+
+	/*
+	 * This logic is inline with irq-gic.c for finding
+	 * the next affinity CPU.
+	 */
+	new_cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	if (new_cpu >= nr_cpu_ids)
+		return;
+
+	old_cpu = event->cpu;
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("irq %d, event %pK, old_cpu(%d)->new_cpu(%d).\n",
+						irq, event, old_cpu, new_cpu);
+
+	/* No change in IRQ affinity */
+	if (old_cpu == new_cpu)
+		return;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+
+	/* If the event is not active OR
+	 * If it is the next event
+	 * and the timer is already in callback
+	 * Just reset cpu and return
+	 */
+	if (!is_event_active(event) ||
+		(is_event_next(event) &&
+		(hrtimer_try_to_cancel(&per_cpu(per_cpu_hrtimer.event_hrtimer,
+						old_cpu)) < 0))) {
+		event->cpu = new_cpu;
+		spin_unlock_irqrestore(&event_timer_lock, flags);
+		if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+			pr_debug("Event:%pK is not active or in callback\n",
+					event);
+		return;
+	}
+
+	/* Update the flag based on EVENT is next are not */
+	if (is_event_next(event))
+		next_event = true;
+
+	event->cpu = new_cpu;
+
+	/*
+	 * We are here either because hrtimer was active or event is not next
+	 * Delete the event from the timer queue anyway
+	 */
+	timerqueue_del(&per_cpu(timer_head, old_cpu), &event->node);
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Event:%pK is in the list\n", event);
+
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+
+	/*
+	 * Migrating event timer to a new CPU is automatically
+	 * taken care. Since we have already modify the event->cpu
+	 * with new CPU.
+	 *
+	 * Typical cases are
+	 *
+	 * 1)
+	 *		C0			C1
+	 *		|			^
+	 *	-----------------		|
+	 *	|	|	|		|
+	 *	E1	E2	E3		|
+	 *		|(migrating)		|
+	 *		-------------------------
+	 *
+	 * 2)
+	 *		C0			C1
+	 *		|			^
+	 *	----------------		|
+	 *	|	|	|		|
+	 *	E1	E2	E3		|
+	 *	|(migrating)			|
+	 *	---------------------------------
+	 *
+	 * Here after moving the E1 to C1. Need to start
+	 * E2 on C0.
+	 */
+	spin_lock(&event_setup_lock);
+	/* Setup event timer on new cpu*/
+	setup_event_hrtimer(event);
+
+	/* Setup event on the old cpu*/
+	if (next_event) {
+		struct timerqueue_node *next;
+
+		next = timerqueue_getnext(&per_cpu(timer_head, old_cpu));
+		if (next) {
+			event = container_of(next,
+					struct event_timer_info, node);
+			setup_event_hrtimer(event);
+		}
+	}
+	spin_unlock(&event_setup_lock);
+}
+
+/**
+ * activate_event_timer() : Set the expiration time for an event in absolute
+ *                           ktime. This is a oneshot event timer, clients
+ *                           should call this again to set another expiration.
+ *  @event : event handle.
+ *  @event_time : event time in absolute ktime.
+ */
+void activate_event_timer(struct event_timer_info *event, ktime_t event_time)
+{
+	if (!event)
+		return;
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Adding event %pK timer @ %lu(on cpu%d)\n", event,
+				(unsigned long)ktime_to_us(event_time),
+				event->cpu);
+
+	spin_lock(&event_setup_lock);
+	event->node.expires = event_time;
+	/* Start hrtimer and add event to rb tree */
+	setup_event_hrtimer(event);
+	spin_unlock(&event_setup_lock);
+}
+EXPORT_SYMBOL(activate_event_timer);
+
+/**
+ * deactivate_event_timer() : Deactivate an event timer, this removes the event
+ *				from the time ordered queue of event timers.
+ * @event: event handle.
+ */
+void deactivate_event_timer(struct event_timer_info *event)
+{
+	unsigned long flags;
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Deactivate timer\n");
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	if (is_event_active(event)) {
+		if (is_event_next(event))
+			hrtimer_try_to_cancel(&per_cpu(
+				per_cpu_hrtimer.event_hrtimer, event->cpu));
+
+		timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
+	}
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+}
+
+/**
+ * destroy_event_timer() : Free the event info data structure allocated during
+ *                         add_event_timer().
+ * @event: event handle.
+ */
+void destroy_event_timer(struct event_timer_info *event)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	if (is_event_active(event)) {
+		if (is_event_next(event))
+			hrtimer_try_to_cancel(&per_cpu(
+				per_cpu_hrtimer.event_hrtimer, event->cpu));
+
+		timerqueue_del(&per_cpu(timer_head, event->cpu), &event->node);
+	}
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+	kfree(event);
+}
+EXPORT_SYMBOL(destroy_event_timer);
+
+/**
+ * get_next_event_timer() - Get the next wakeup event. Returns
+ *                          a ktime value of the next expiring event.
+ */
+ktime_t get_next_event_time(int cpu)
+{
+	unsigned long flags;
+	struct timerqueue_node *next;
+	struct event_timer_info *event;
+	ktime_t next_event = ns_to_ktime(0);
+
+	spin_lock_irqsave(&event_timer_lock, flags);
+	next = timerqueue_getnext(&per_cpu(timer_head, cpu));
+	event = container_of(next, struct event_timer_info, node);
+	spin_unlock_irqrestore(&event_timer_lock, flags);
+
+	if (!next || event->cpu != cpu)
+		return next_event;
+
+	next_event = hrtimer_get_remaining(
+				&per_cpu(per_cpu_hrtimer.event_hrtimer, cpu));
+
+	if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+		pr_debug("Next Event %lu(on cpu%d)\n",
+			(unsigned long)ktime_to_us(next_event), cpu);
+
+	return next_event;
+}
diff --git a/drivers/soc/qcom/lpm-stats.c b/drivers/soc/qcom/lpm-stats.c
new file mode 100644
index 0000000..4591798f
--- /dev/null
+++ b/drivers/soc/qcom/lpm-stats.c
@@ -0,0 +1,878 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/suspend.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/lpm-stats.h>
+
+#define MAX_STR_LEN 256
+#define MAX_TIME_LEN 20
+const char *lpm_stats_reset = "reset";
+const char *lpm_stats_suspend = "suspend";
+
+struct lpm_sleep_time {
+	struct kobj_attribute ts_attr;
+	unsigned int cpu;
+};
+
+struct level_stats {
+	const char *name;
+	struct lpm_stats *owner;
+	int64_t first_bucket_time;
+	int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+	int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+	int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+	int success_count;
+	int failed_count;
+	uint64_t total_time;
+	uint64_t enter_time;
+};
+
+static struct level_stats suspend_time_stats;
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct lpm_stats, cpu_stats);
+
+static uint64_t get_total_sleep_time(unsigned int cpu_id)
+{
+	struct lpm_stats *stats = &per_cpu(cpu_stats, cpu_id);
+	int i;
+	uint64_t ret = 0;
+
+	for (i = 0; i < stats->num_levels; i++)
+		ret += stats->time_stats[i].total_time;
+
+	return ret;
+}
+
+static void update_level_stats(struct level_stats *stats, uint64_t t,
+				bool success)
+{
+	uint64_t bt;
+	int i;
+
+	if (!success) {
+		stats->failed_count++;
+		return;
+	}
+
+	stats->success_count++;
+	stats->total_time += t;
+	bt = t;
+	do_div(bt, stats->first_bucket_time);
+
+	if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
+			(CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
+		i = DIV_ROUND_UP(fls((uint32_t)bt),
+			CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
+	else
+		i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+	if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
+		i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+	stats->bucket[i]++;
+
+	if (t < stats->min_time[i] || !stats->max_time[i])
+		stats->min_time[i] = t;
+	if (t > stats->max_time[i])
+		stats->max_time[i] = t;
+}
+
+static void level_stats_print(struct seq_file *m, struct level_stats *stats)
+{
+	int i = 0;
+	int64_t bucket_time = 0;
+	char seqs[MAX_STR_LEN] = {0};
+	uint64_t s = stats->total_time;
+	uint32_t ns = do_div(s, NSEC_PER_SEC);
+
+	snprintf(seqs, MAX_STR_LEN,
+		"[%s] %s:\n"
+		"  success count: %7d\n"
+		"  total success time: %lld.%09u\n",
+		stats->owner->name,
+		stats->name,
+		stats->success_count,
+		s, ns);
+	seq_puts(m, seqs);
+
+	if (stats->failed_count) {
+		snprintf(seqs, MAX_STR_LEN, "  failed count: %7d\n",
+			stats->failed_count);
+		seq_puts(m, seqs);
+	}
+
+	bucket_time = stats->first_bucket_time;
+	for (i = 0;
+		i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+		i++) {
+		s = bucket_time;
+		ns = do_div(s, NSEC_PER_SEC);
+		snprintf(seqs, MAX_STR_LEN,
+			"\t<%6lld.%09u: %7d (%lld-%lld)\n",
+			s, ns, stats->bucket[i],
+				stats->min_time[i],
+				stats->max_time[i]);
+		seq_puts(m, seqs);
+		bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
+	}
+	snprintf(seqs, MAX_STR_LEN,
+		"\t>=%5lld.%09u:%8d (%lld-%lld)\n",
+		s, ns, stats->bucket[i],
+		stats->min_time[i],
+		stats->max_time[i]);
+	seq_puts(m, seqs);
+}
+
+static int level_stats_file_show(struct seq_file *m, void *v)
+{
+	struct level_stats *stats = NULL;
+
+	if (!m->private)
+		return -EINVAL;
+
+	stats = (struct level_stats *) m->private;
+
+	level_stats_print(m, stats);
+
+	return 0;
+}
+
+static int level_stats_file_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, level_stats_file_show, inode->i_private);
+}
+
+static void level_stats_print_all(struct seq_file *m, struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+	int i = 0;
+
+	for (i = 0; i < stats->num_levels; i++)
+		level_stats_print(m, &stats->time_stats[i]);
+
+	if (list_empty(&stats->child))
+		return;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		level_stats_print_all(m, pos);
+	}
+}
+
+static void level_stats_reset(struct level_stats *stats)
+{
+	memset(stats->bucket, 0, sizeof(stats->bucket));
+	memset(stats->min_time, 0, sizeof(stats->min_time));
+	memset(stats->max_time, 0, sizeof(stats->max_time));
+	stats->success_count = 0;
+	stats->failed_count = 0;
+	stats->total_time = 0;
+}
+
+static void level_stats_reset_all(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+	int i = 0;
+
+	for (i = 0; i < stats->num_levels; i++)
+		level_stats_reset(&stats->time_stats[i]);
+
+	if (list_empty(&stats->child))
+		return;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		level_stats_reset_all(pos);
+	}
+}
+
+static int lpm_stats_file_show(struct seq_file *m, void *v)
+{
+	struct lpm_stats *stats = (struct lpm_stats *)m->private;
+
+	if (!m->private) {
+		pr_err("%s: Invalid pdata, Cannot print stats\n", __func__);
+		return -EINVAL;
+	}
+
+	level_stats_print_all(m, stats);
+	level_stats_print(m, &suspend_time_stats);
+
+	return 0;
+}
+
+static int lpm_stats_file_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lpm_stats_file_show, inode->i_private);
+}
+
+static ssize_t level_stats_file_write(struct file *file,
+	const char __user *buffer, size_t count, loff_t *off)
+{
+	char buf[MAX_STR_LEN] = {0};
+	struct inode *in = file->f_inode;
+	struct level_stats *stats = (struct level_stats *)in->i_private;
+	size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+	if (!stats)
+		return -EINVAL;
+
+	if (count != len+1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	if (strcmp(buf, lpm_stats_reset))
+		return -EINVAL;
+
+	level_stats_reset(stats);
+
+	return count;
+}
+
+static void reset_cpu_stats(void *info)
+{
+	struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+	int i;
+
+	for (i = 0; i < stats->num_levels; i++)
+		level_stats_reset(&stats->time_stats[i]);
+}
+
+static ssize_t lpm_stats_file_write(struct file *file,
+	const char __user *buffer, size_t count, loff_t *off)
+{
+	char buf[MAX_STR_LEN] = {0};
+	struct inode *in = file->f_inode;
+	struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
+	size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+	if (!stats)
+		return -EINVAL;
+
+	if (count != len+1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	if (strcmp(buf, lpm_stats_reset))
+		return -EINVAL;
+
+	level_stats_reset_all(stats);
+	/*
+	 * Wake up each CPU and reset the stats from that CPU,
+	 * for that CPU, so we could have better timestamp for
+	 * accounting.
+	 */
+	on_each_cpu(reset_cpu_stats, NULL, 1);
+
+	return count;
+}
+
+int lifo_stats_file_show(struct seq_file *m, void *v)
+{
+	struct lpm_stats *stats = NULL;
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+	char seqs[MAX_STR_LEN] = {0};
+
+	if (!m->private)
+		return -EINVAL;
+
+	stats = (struct lpm_stats *)m->private;
+
+	if (list_empty(&stats->child)) {
+		pr_err("%s: ERROR: Lifo level with no children.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		snprintf(seqs, MAX_STR_LEN,
+			"%s:\n"
+			"\tLast-In:%u\n"
+			"\tFirst-Out:%u\n",
+			pos->name,
+			pos->lifo.last_in,
+			pos->lifo.first_out);
+		seq_puts(m, seqs);
+	}
+	return 0;
+}
+
+static int lifo_stats_file_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, lifo_stats_file_show, inode->i_private);
+}
+
+static void lifo_stats_reset_all(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		pos->lifo.last_in = 0;
+		pos->lifo.first_out = 0;
+		if (!list_empty(&pos->child))
+			lifo_stats_reset_all(pos);
+	}
+}
+
+static ssize_t lifo_stats_file_write(struct file *file,
+	const char __user *buffer, size_t count, loff_t *off)
+{
+	char buf[MAX_STR_LEN] = {0};
+	struct inode *in = file->f_inode;
+	struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
+	size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+	if (!stats)
+		return -EINVAL;
+
+	if (count != len+1)
+		return -EINVAL;
+
+	if (copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	if (strcmp(buf, lpm_stats_reset))
+		return -EINVAL;
+
+	lifo_stats_reset_all(stats);
+
+	return count;
+}
+
+static const struct file_operations level_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = level_stats_file_open,
+	.read	  = seq_read,
+	.release  = single_release,
+	.llseek   = no_llseek,
+	.write	  = level_stats_file_write,
+};
+
+static const struct file_operations lpm_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = lpm_stats_file_open,
+	.read	  = seq_read,
+	.release  = single_release,
+	.llseek   = no_llseek,
+	.write	  = lpm_stats_file_write,
+};
+
+static const struct file_operations lifo_stats_fops = {
+	.owner	  = THIS_MODULE,
+	.open	  = lifo_stats_file_open,
+	.read	  = seq_read,
+	.release  = single_release,
+	.llseek   = no_llseek,
+	.write	  = lifo_stats_file_write,
+};
+
+static void update_last_in_stats(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+
+	if (list_empty(&stats->child))
+		return;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
+			pos->lifo.last_in++;
+			return;
+		}
+	}
+	WARN(1, "Should not reach here\n");
+}
+
+static void update_first_out_stats(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+
+	if (list_empty(&stats->child))
+		return;
+
+	centry = &stats->child;
+	list_for_each_entry(pos, centry, sibling) {
+		if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
+			pos->lifo.first_out++;
+			return;
+		}
+	}
+	WARN(1, "Should not reach here\n");
+}
+
+static inline void update_exit_stats(struct lpm_stats *stats, uint32_t index,
+					bool success)
+{
+	uint64_t exit_time = 0;
+
+	/* Update time stats only when exit is preceded by enter */
+	if (stats->sleep_time < 0)
+		success = false;
+	else
+		exit_time = stats->sleep_time;
+	update_level_stats(&stats->time_stats[index], exit_time,
+					success);
+}
+
+static int config_level(const char *name, const char **levels,
+	int num_levels, struct lpm_stats *parent, struct lpm_stats *stats)
+{
+	int i = 0;
+	struct dentry *directory = NULL;
+	const char *rootname = "lpm_stats";
+	const char *dirname = rootname;
+
+	strlcpy(stats->name, name, MAX_STR_LEN);
+	stats->num_levels = num_levels;
+	stats->parent = parent;
+	INIT_LIST_HEAD(&stats->sibling);
+	INIT_LIST_HEAD(&stats->child);
+
+	stats->time_stats = kcalloc(num_levels, sizeof(struct level_stats),
+					GFP_KERNEL);
+	if (!stats->time_stats)
+		return -ENOMEM;
+
+	if (parent) {
+		list_add_tail(&stats->sibling, &parent->child);
+		directory = parent->directory;
+		dirname = name;
+	}
+
+	stats->directory = debugfs_create_dir(dirname, directory);
+	if (!stats->directory) {
+		pr_err("%s: Unable to create %s debugfs directory\n",
+			__func__, dirname);
+		kfree(stats->time_stats);
+		return -EPERM;
+	}
+
+	for (i = 0; i < num_levels; i++) {
+		stats->time_stats[i].name = levels[i];
+		stats->time_stats[i].owner = stats;
+		stats->time_stats[i].first_bucket_time =
+			CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+		stats->time_stats[i].enter_time = 0;
+
+		if (!debugfs_create_file(stats->time_stats[i].name, 0444,
+			stats->directory, (void *)&stats->time_stats[i],
+			&level_stats_fops)) {
+			pr_err("%s: Unable to create %s %s level-stats file\n",
+				__func__, stats->name,
+				stats->time_stats[i].name);
+			kfree(stats->time_stats);
+			return -EPERM;
+		}
+	}
+
+	if (!debugfs_create_file("stats", 0444, stats->directory,
+		(void *)stats, &lpm_stats_fops)) {
+		pr_err("%s: Unable to create %s's overall 'stats' file\n",
+			__func__, stats->name);
+		kfree(stats->time_stats);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static ssize_t total_sleep_time_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	struct lpm_sleep_time *cpu_sleep_time = container_of(attr,
+			struct lpm_sleep_time, ts_attr);
+	unsigned int cpu = cpu_sleep_time->cpu;
+	uint64_t total_time = get_total_sleep_time(cpu);
+
+	return snprintf(buf, MAX_TIME_LEN, "%llu.%09u\n", total_time,
+			do_div(total_time, NSEC_PER_SEC));
+}
+
+static struct kobject *local_module_kobject(void)
+{
+	struct kobject *kobj;
+
+	kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+
+	if (!kobj) {
+		int err;
+		struct module_kobject *mk;
+
+		mk = kzalloc(sizeof(*mk), GFP_KERNEL);
+		if (!mk)
+			return ERR_PTR(-ENOMEM);
+
+		mk->mod = THIS_MODULE;
+		mk->kobj.kset = module_kset;
+
+		err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL,
+				"%s", KBUILD_MODNAME);
+
+		if (err) {
+			kobject_put(&mk->kobj);
+			kfree(mk);
+			pr_err("%s: cannot create kobject for %s\n",
+					__func__, KBUILD_MODNAME);
+			return ERR_PTR(err);
+		}
+
+		kobject_get(&mk->kobj);
+		kobj = &mk->kobj;
+	}
+
+	return kobj;
+}
+
+static int create_sysfs_node(unsigned int cpu, struct lpm_stats *stats)
+{
+	struct kobject *cpu_kobj = NULL;
+	struct lpm_sleep_time *ts = NULL;
+	struct kobject *stats_kobj;
+	char cpu_name[] = "cpuXX";
+	int ret = -ENOMEM;
+
+	stats_kobj = local_module_kobject();
+
+	if (IS_ERR_OR_NULL(stats_kobj))
+		return PTR_ERR(stats_kobj);
+
+	snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
+	cpu_kobj = kobject_create_and_add(cpu_name, stats_kobj);
+	if (!cpu_kobj)
+		return -ENOMEM;
+
+	ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+	if (!ts)
+		goto failed;
+
+	sysfs_attr_init(&ts->ts_attr.attr);
+	ts->ts_attr.attr.name = "total_sleep_time_secs";
+	ts->ts_attr.attr.mode = 0444;
+	ts->ts_attr.show = total_sleep_time_show;
+	ts->ts_attr.store = NULL;
+	ts->cpu = cpu;
+
+	ret = sysfs_create_file(cpu_kobj, &ts->ts_attr.attr);
+	if (ret)
+		goto failed;
+
+	return 0;
+
+failed:
+	kfree(ts);
+	kobject_put(cpu_kobj);
+	return ret;
+}
+
+static struct lpm_stats *config_cpu_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent,
+	struct cpumask *mask)
+{
+	int cpu = 0;
+	struct lpm_stats *pstats = NULL;
+	struct lpm_stats *stats = NULL;
+
+	for (pstats = parent; pstats; pstats = pstats->parent)
+		cpumask_or(&pstats->mask, &pstats->mask, mask);
+
+	for_each_cpu(cpu, mask) {
+		int ret = 0;
+		char cpu_name[MAX_STR_LEN] = {0};
+
+		stats = &per_cpu(cpu_stats, cpu);
+		snprintf(cpu_name, MAX_STR_LEN, "%s%d", name, cpu);
+		cpumask_set_cpu(cpu, &stats->mask);
+
+		stats->is_cpu = true;
+
+		ret = config_level(cpu_name, levels, num_levels, parent,
+					stats);
+		if (ret) {
+			pr_err("%s: Unable to create %s stats\n",
+				__func__, cpu_name);
+			return ERR_PTR(ret);
+		}
+
+		ret = create_sysfs_node(cpu, stats);
+
+		if (ret) {
+			pr_err("Could not create the sysfs node\n");
+			return ERR_PTR(ret);
+		}
+	}
+
+	return stats;
+}
+
+static void config_suspend_level(struct lpm_stats *stats)
+{
+	suspend_time_stats.name = lpm_stats_suspend;
+	suspend_time_stats.owner = stats;
+	suspend_time_stats.first_bucket_time =
+			CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
+	suspend_time_stats.enter_time = 0;
+	suspend_time_stats.success_count = 0;
+	suspend_time_stats.failed_count = 0;
+
+	if (!debugfs_create_file(suspend_time_stats.name, 0444,
+		stats->directory, (void *)&suspend_time_stats,
+		&level_stats_fops))
+		pr_err("%s: Unable to create %s Suspend stats file\n",
+			__func__, stats->name);
+}
+
+static struct lpm_stats *config_cluster_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent)
+{
+	struct lpm_stats *stats = NULL;
+	int ret = 0;
+
+	stats = kzalloc(sizeof(struct lpm_stats), GFP_KERNEL);
+	if (!stats)
+		return ERR_PTR(-ENOMEM);
+
+	stats->is_cpu = false;
+
+	ret = config_level(name, levels, num_levels, parent, stats);
+	if (ret) {
+		pr_err("%s: Unable to create %s stats\n", __func__,
+			name);
+		kfree(stats);
+		return ERR_PTR(ret);
+	}
+
+	if (!debugfs_create_file("lifo", 0444, stats->directory,
+		(void *)stats, &lifo_stats_fops)) {
+		pr_err("%s: Unable to create %s lifo stats file\n",
+			__func__, stats->name);
+		kfree(stats);
+		return ERR_PTR(-EPERM);
+	}
+
+	if (!parent)
+		config_suspend_level(stats);
+
+	return stats;
+}
+
+static void cleanup_stats(struct lpm_stats *stats)
+{
+	struct list_head *centry = NULL;
+	struct lpm_stats *pos = NULL;
+	struct lpm_stats *n = NULL;
+
+	centry = &stats->child;
+	list_for_each_entry_safe_reverse(pos, n, centry, sibling) {
+		if (!list_empty(&pos->child)) {
+			cleanup_stats(pos);
+			continue;
+		}
+
+		list_del_init(&pos->child);
+
+		kfree(pos->time_stats);
+		if (!pos->is_cpu)
+			kfree(pos);
+	}
+	kfree(stats->time_stats);
+	kfree(stats);
+}
+
+static void lpm_stats_cleanup(struct lpm_stats *stats)
+{
+	struct lpm_stats *pstats = stats;
+
+	if (!pstats)
+		return;
+
+	while (pstats->parent)
+		pstats = pstats->parent;
+
+	debugfs_remove_recursive(pstats->directory);
+
+	cleanup_stats(pstats);
+}
+
+/**
+ * lpm_stats_config_level() - API to configure levels stats.
+ *
+ * @name:	Name of the cluster/cpu.
+ * @levels:	Low power mode level names.
+ * @num_levels:	Number of leves supported.
+ * @parent:	Pointer to the parent's lpm_stats object.
+ * @mask:	cpumask, if configuring cpu stats, else NULL.
+ *
+ * Function to communicate the low power mode levels supported by
+ * cpus or a cluster.
+ *
+ * Return: Pointer to the lpm_stats object or ERR_PTR(-ERRNO)
+ */
+struct lpm_stats *lpm_stats_config_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent,
+	struct cpumask *mask)
+{
+	struct lpm_stats *stats = NULL;
+
+	if (!levels || num_levels <= 0 || IS_ERR(parent)) {
+		pr_err("%s: Invalid input\n\t\tlevels = %p\n\t\t"
+			"num_levels = %d\n\t\tparent = %ld\n",
+			__func__, levels, num_levels, PTR_ERR(parent));
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (mask)
+		stats = config_cpu_level(name, levels, num_levels, parent,
+						mask);
+	else
+		stats = config_cluster_level(name, levels, num_levels,
+						parent);
+
+	if (IS_ERR(stats)) {
+		lpm_stats_cleanup(parent);
+		return stats;
+	}
+
+	return stats;
+}
+EXPORT_SYMBOL(lpm_stats_config_level);
+
+/**
+ * lpm_stats_cluster_enter() - API to communicate the lpm level a cluster
+ * is prepared to enter.
+ *
+ * @stats:	Pointer to the cluster's lpm_stats object.
+ * @index:	Index of the lpm level that the cluster is going to enter.
+ *
+ * Function to communicate the low power mode level that the cluster is
+ * prepared to enter.
+ */
+void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index)
+{
+	if (IS_ERR_OR_NULL(stats))
+		return;
+
+	update_last_in_stats(stats);
+}
+EXPORT_SYMBOL(lpm_stats_cluster_enter);
+
+/**
+ * lpm_stats_cluster_exit() - API to communicate the lpm level a cluster
+ * exited.
+ *
+ * @stats:	Pointer to the cluster's lpm_stats object.
+ * @index:	Index of the cluster lpm level.
+ * @success:	Success/Failure of the low power mode execution.
+ *
+ * Function to communicate the low power mode level that the cluster
+ * exited.
+ */
+void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
+				bool success)
+{
+	if (IS_ERR_OR_NULL(stats))
+		return;
+
+	update_exit_stats(stats, index, success);
+
+	update_first_out_stats(stats);
+}
+EXPORT_SYMBOL(lpm_stats_cluster_exit);
+
+/**
+ * lpm_stats_cpu_enter() - API to communicate the lpm level a cpu
+ * is prepared to enter.
+ *
+ * @index:	cpu's lpm level index.
+ *
+ * Function to communicate the low power mode level that the cpu is
+ * prepared to enter.
+ */
+void lpm_stats_cpu_enter(uint32_t index, uint64_t time)
+{
+	struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+
+	stats->sleep_time = time;
+
+	if (!stats->time_stats)
+		return;
+
+}
+EXPORT_SYMBOL(lpm_stats_cpu_enter);
+
+/**
+ * lpm_stats_cpu_exit() - API to communicate the lpm level that the cpu exited.
+ *
+ * @index:	cpu's lpm level index.
+ * @success:	Success/Failure of the low power mode execution.
+ *
+ * Function to communicate the low power mode level that the cpu exited.
+ */
+void lpm_stats_cpu_exit(uint32_t index, uint64_t time, bool success)
+{
+	struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+
+	if (!stats->time_stats)
+		return;
+
+	stats->sleep_time = time - stats->sleep_time;
+
+	update_exit_stats(stats, index, success);
+}
+EXPORT_SYMBOL(lpm_stats_cpu_exit);
+
+/**
+ * lpm_stats_suspend_enter() - API to communicate system entering suspend.
+ *
+ * Function to communicate that the system is ready to enter suspend.
+ */
+void lpm_stats_suspend_enter(void)
+{
+	struct timespec ts;
+
+	getnstimeofday(&ts);
+	suspend_time_stats.enter_time = timespec_to_ns(&ts);
+}
+EXPORT_SYMBOL(lpm_stats_suspend_enter);
+
+/**
+ * lpm_stats_suspend_exit() - API to communicate system exiting suspend.
+ *
+ * Function to communicate that the system exited suspend.
+ */
+void lpm_stats_suspend_exit(void)
+{
+	struct timespec ts;
+	uint64_t exit_time = 0;
+
+	getnstimeofday(&ts);
+	exit_time = timespec_to_ns(&ts) - suspend_time_stats.enter_time;
+	update_level_stats(&suspend_time_stats, exit_time, true);
+}
+EXPORT_SYMBOL(lpm_stats_suspend_exit);
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
new file mode 100644
index 0000000..2f64d1f
--- /dev/null
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <asm/arch_timer.h>
+
+#define RPM_STATS_NUM_REC	2
+#define MSM_ARCH_TIMER_FREQ	19200000
+
+#define GET_PDATA_OF_ATTR(attr) \
+	(container_of(attr, struct msm_rpmstats_kobj_attr, ka)->pd)
+
+struct msm_rpmstats_record {
+	char name[32];
+	u32 id;
+	u32 val;
+};
+
+struct msm_rpmstats_platform_data {
+	phys_addr_t phys_addr_base;
+	u32 phys_size;
+	u32 num_records;
+};
+
+struct msm_rpmstats_private_data {
+	void __iomem *reg_base;
+	u32 num_records;
+	u32 read_idx;
+	u32 len;
+	char buf[480];
+	struct msm_rpmstats_platform_data *platform_data;
+};
+
+struct msm_rpm_stats_data {
+	u32 stat_type;
+	u32 count;
+	u64 last_entered_at;
+	u64 last_exited_at;
+	u64 accumulated;
+#if defined(CONFIG_MSM_RPM_SMD)
+	u32 client_votes;
+	u32 reserved[3];
+#endif
+
+};
+
+struct msm_rpmstats_kobj_attr {
+	struct kobject *kobj;
+	struct kobj_attribute ka;
+	struct msm_rpmstats_platform_data *pd;
+};
+
+static inline u64 get_time_in_sec(u64 counter)
+{
+	do_div(counter, MSM_ARCH_TIMER_FREQ);
+
+	return counter;
+}
+
+static inline u64 get_time_in_msec(u64 counter)
+{
+	do_div(counter, MSM_ARCH_TIMER_FREQ);
+	counter *= MSEC_PER_SEC;
+
+	return counter;
+}
+
+static inline int msm_rpmstats_append_data_to_buf(char *buf,
+		struct msm_rpm_stats_data *data, int buflength)
+{
+	char stat_type[5];
+	u64 time_in_last_mode;
+	u64 time_since_last_mode;
+	u64 actual_last_sleep;
+
+	stat_type[4] = 0;
+	memcpy(stat_type, &data->stat_type, sizeof(u32));
+
+	time_in_last_mode = data->last_exited_at - data->last_entered_at;
+	time_in_last_mode = get_time_in_msec(time_in_last_mode);
+	time_since_last_mode = arch_counter_get_cntvct() - data->last_exited_at;
+	time_since_last_mode = get_time_in_sec(time_since_last_mode);
+	actual_last_sleep = get_time_in_msec(data->accumulated);
+
+#if defined(CONFIG_MSM_RPM_SMD)
+	return snprintf(buf, buflength,
+		"RPM Mode:%s\n\t count:%d\ntime in last mode(msec):%llu\n"
+		"time since last mode(sec):%llu\nactual last sleep(msec):%llu\n"
+		"client votes: %#010x\n\n",
+		stat_type, data->count, time_in_last_mode,
+		time_since_last_mode, actual_last_sleep,
+		data->client_votes);
+#else
+	return snprintf(buf, buflength,
+		"RPM Mode:%s\n\t count:%d\ntime in last mode(msec):%llu\n"
+		"time since last mode(sec):%llu\nactual last sleep(msec):%llu\n\n",
+		stat_type, data->count, time_in_last_mode,
+		time_since_last_mode, actual_last_sleep);
+#endif
+}
+
+static inline u32 msm_rpmstats_read_long_register(void __iomem *regbase,
+		int index, int offset)
+{
+	return readl_relaxed(regbase + offset +
+			index * sizeof(struct msm_rpm_stats_data));
+}
+
+static inline u64 msm_rpmstats_read_quad_register(void __iomem *regbase,
+		int index, int offset)
+{
+	u64 dst;
+
+	memcpy_fromio(&dst,
+		regbase + offset + index * sizeof(struct msm_rpm_stats_data),
+		8);
+	return dst;
+}
+
+static inline int msm_rpmstats_copy_stats(
+			struct msm_rpmstats_private_data *prvdata)
+{
+	void __iomem *reg;
+	struct msm_rpm_stats_data data;
+	int i, length;
+
+	reg = prvdata->reg_base;
+
+	for (i = 0, length = 0; i < prvdata->num_records; i++) {
+		data.stat_type = msm_rpmstats_read_long_register(reg, i,
+				offsetof(struct msm_rpm_stats_data,
+					stat_type));
+		data.count = msm_rpmstats_read_long_register(reg, i,
+				offsetof(struct msm_rpm_stats_data, count));
+		data.last_entered_at = msm_rpmstats_read_quad_register(reg,
+				i, offsetof(struct msm_rpm_stats_data,
+					last_entered_at));
+		data.last_exited_at = msm_rpmstats_read_quad_register(reg,
+				i, offsetof(struct msm_rpm_stats_data,
+					last_exited_at));
+		data.accumulated = msm_rpmstats_read_quad_register(reg,
+				i, offsetof(struct msm_rpm_stats_data,
+					accumulated));
+#if defined(CONFIG_MSM_RPM_SMD)
+		data.client_votes = msm_rpmstats_read_long_register(reg,
+				i, offsetof(struct msm_rpm_stats_data,
+					client_votes));
+#endif
+
+		length += msm_rpmstats_append_data_to_buf(prvdata->buf + length,
+				&data, sizeof(prvdata->buf) - length);
+		prvdata->read_idx++;
+	}
+
+	return length;
+}
+
+static ssize_t rpmstats_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct msm_rpmstats_private_data prvdata;
+	struct msm_rpmstats_platform_data *pdata = NULL;
+	ssize_t length;
+
+	pdata = GET_PDATA_OF_ATTR(attr);
+
+	prvdata.reg_base = ioremap_nocache(pdata->phys_addr_base,
+					pdata->phys_size);
+	if (!prvdata.reg_base) {
+		pr_err("ERROR could not ioremap start=%pa, len=%u\n",
+				&pdata->phys_addr_base, pdata->phys_size);
+		return -EBUSY;
+	}
+
+	prvdata.read_idx = prvdata.len = 0;
+	prvdata.platform_data = pdata;
+	prvdata.num_records = pdata->num_records;
+
+	if (prvdata.read_idx < prvdata.num_records)
+		prvdata.len = msm_rpmstats_copy_stats(&prvdata);
+
+	length = scnprintf(buf, prvdata.len, "%s", prvdata.buf);
+	iounmap(prvdata.reg_base);
+	return length;
+}
+
+static int msm_rpmstats_create_sysfs(struct platform_device *pdev,
+				struct msm_rpmstats_platform_data *pd)
+{
+	struct kobject *rpmstats_kobj = NULL;
+	struct msm_rpmstats_kobj_attr *rpms_ka = NULL;
+	int ret = 0;
+
+	rpmstats_kobj = kobject_create_and_add("system_sleep", power_kobj);
+	if (!rpmstats_kobj) {
+		pr_err("Cannot create rpmstats kobject\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	rpms_ka = kzalloc(sizeof(*rpms_ka), GFP_KERNEL);
+	if (!rpms_ka) {
+		kobject_put(rpmstats_kobj);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	rpms_ka->kobj = rpmstats_kobj;
+
+	sysfs_attr_init(&rpms_ka->ka.attr);
+	rpms_ka->pd = pd;
+	rpms_ka->ka.attr.mode = 0444;
+	rpms_ka->ka.attr.name = "stats";
+	rpms_ka->ka.show = rpmstats_show;
+	rpms_ka->ka.store = NULL;
+
+	ret = sysfs_create_file(rpmstats_kobj, &rpms_ka->ka.attr);
+	platform_set_drvdata(pdev, rpms_ka);
+
+fail:
+	return ret;
+}
+
+static int msm_rpmstats_probe(struct platform_device *pdev)
+{
+	struct msm_rpmstats_platform_data *pdata;
+	struct resource *res = NULL, *offset = NULL;
+	u32 offset_addr = 0;
+	void __iomem *phys_ptr = NULL;
+	char *key;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	key = "phys_addr_base";
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!res)
+		return -EINVAL;
+
+	key = "offset_addr";
+	offset = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (offset) {
+		/* Remap the rpm-stats pointer */
+		phys_ptr = ioremap_nocache(offset->start, SZ_4);
+		if (!phys_ptr) {
+			pr_err("Failed to ioremap offset address\n");
+			return -ENODEV;
+		}
+		offset_addr = readl_relaxed(phys_ptr);
+		iounmap(phys_ptr);
+	}
+
+	pdata->phys_addr_base  = res->start + offset_addr;
+	pdata->phys_size = resource_size(res);
+
+	key = "qcom,num-records";
+	if (of_property_read_u32(pdev->dev.of_node, key, &pdata->num_records))
+		pdata->num_records = RPM_STATS_NUM_REC;
+
+	msm_rpmstats_create_sysfs(pdev, pdata);
+
+	return 0;
+}
+
+static int msm_rpmstats_remove(struct platform_device *pdev)
+{
+	struct msm_rpmstats_kobj_attr *rpms_ka;
+
+	if (!pdev)
+		return -EINVAL;
+
+	rpms_ka = (struct msm_rpmstats_kobj_attr *)
+			platform_get_drvdata(pdev);
+
+	sysfs_remove_file(rpms_ka->kobj, &rpms_ka->ka.attr);
+	kobject_put(rpms_ka->kobj);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+
+static const struct of_device_id rpm_stats_table[] = {
+	{ .compatible = "qcom,rpm-stats" },
+	{ },
+};
+
+static struct platform_driver msm_rpmstats_driver = {
+	.probe = msm_rpmstats_probe,
+	.remove = msm_rpmstats_remove,
+	.driver = {
+		.name = "msm_rpm_stat",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_stats_table,
+	},
+};
+builtin_platform_driver(msm_rpmstats_driver);
diff --git a/drivers/soc/qcom/rpmh_master_stat.c b/drivers/soc/qcom/rpmh_master_stat.c
new file mode 100644
index 0000000..86694f3
--- /dev/null
+++ b/drivers/soc/qcom/rpmh_master_stat.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/uaccess.h>
+#include <linux/soc/qcom/smem.h>
+#include <asm/arch_timer.h>
+#include "rpmh_master_stat.h"
+
+#define UNIT_DIST 0x14
+#define REG_VALID 0x0
+#define REG_DATA_LO 0x4
+#define REG_DATA_HI 0x8
+
+#define GET_ADDR(REG, UNIT_NO) (REG + (UNIT_DIST * UNIT_NO))
+
+enum master_smem_id {
+	MPSS = 605,
+	ADSP,
+	CDSP,
+	SLPI,
+	GPU,
+	DISPLAY,
+};
+
+enum master_pid {
+	PID_APSS = 0,
+	PID_MPSS = 1,
+	PID_ADSP = 2,
+	PID_SLPI = 3,
+	PID_CDSP = 5,
+	PID_GPU = PID_APSS,
+	PID_DISPLAY = PID_APSS,
+};
+
+enum profile_data {
+	POWER_DOWN_START,
+	POWER_UP_END,
+	POWER_DOWN_END,
+	POWER_UP_START,
+	NUM_UNIT,
+};
+
+struct msm_rpmh_master_data {
+	char *master_name;
+	enum master_smem_id smem_id;
+	enum master_pid pid;
+};
+
+static const struct msm_rpmh_master_data rpmh_masters[] = {
+	{"MPSS", MPSS, PID_MPSS},
+	{"ADSP", ADSP, PID_ADSP},
+	{"CDSP", CDSP, PID_CDSP},
+	{"SLPI", SLPI, PID_SLPI},
+	{"GPU", GPU, PID_GPU},
+	{"DISPLAY", DISPLAY, PID_DISPLAY},
+};
+
+struct msm_rpmh_master_stats {
+	uint32_t version_id;
+	uint32_t counts;
+	uint64_t last_entered;
+	uint64_t last_exited;
+	uint64_t accumulated_duration;
+};
+
+struct msm_rpmh_profile_unit {
+	uint64_t value;
+	uint64_t valid;
+};
+
+struct rpmh_master_stats_prv_data {
+	struct kobj_attribute ka;
+	struct kobject *kobj;
+};
+
+static struct msm_rpmh_master_stats apss_master_stats;
+static void __iomem *rpmh_unit_base;
+
+static DEFINE_MUTEX(rpmh_stats_mutex);
+
+static ssize_t msm_rpmh_master_stats_print_data(char *prvbuf, ssize_t length,
+				struct msm_rpmh_master_stats *record,
+				const char *name)
+{
+	uint64_t accumulated_duration = record->accumulated_duration;
+	/*
+	 * If a master is in sleep when reading the sleep stats from SMEM
+	 * adjust the accumulated sleep duration to show actual sleep time.
+	 * This ensures that the displayed stats are real when used for
+	 * the purpose of computing battery utilization.
+	 */
+	if (record->last_entered > record->last_exited)
+		accumulated_duration +=
+				(arch_counter_get_cntvct()
+				- record->last_entered);
+
+	return snprintf(prvbuf, length, "%s\n\tVersion:0x%x\n"
+			"\tSleep Count:0x%x\n"
+			"\tSleep Last Entered At:0x%llx\n"
+			"\tSleep Last Exited At:0x%llx\n"
+			"\tSleep Accumulated Duration:0x%llx\n\n",
+			name, record->version_id, record->counts,
+			record->last_entered, record->last_exited,
+			accumulated_duration);
+}
+
+static ssize_t msm_rpmh_master_stats_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf)
+{
+	ssize_t length;
+	int i = 0;
+	size_t size = 0;
+	struct msm_rpmh_master_stats *record = NULL;
+
+	mutex_lock(&rpmh_stats_mutex);
+
+	/* First Read APSS master stats */
+
+	length = msm_rpmh_master_stats_print_data(buf, PAGE_SIZE,
+						&apss_master_stats, "APSS");
+
+	/* Read SMEM data written by other masters */
+
+	for (i = 0; i < ARRAY_SIZE(rpmh_masters); i++) {
+		record = (struct msm_rpmh_master_stats *) qcom_smem_get(
+					rpmh_masters[i].pid,
+					rpmh_masters[i].smem_id, &size);
+		if (!IS_ERR_OR_NULL(record) && (PAGE_SIZE - length > 0))
+			length += msm_rpmh_master_stats_print_data(
+					buf + length, PAGE_SIZE - length,
+					record,
+					rpmh_masters[i].master_name);
+	}
+
+	mutex_unlock(&rpmh_stats_mutex);
+
+	return length;
+}
+
+static inline void msm_rpmh_apss_master_stats_update(
+				struct msm_rpmh_profile_unit *profile_unit)
+{
+	apss_master_stats.counts++;
+	apss_master_stats.last_entered = profile_unit[POWER_DOWN_END].value;
+	apss_master_stats.last_exited = profile_unit[POWER_UP_START].value;
+	apss_master_stats.accumulated_duration +=
+					(apss_master_stats.last_exited
+					- apss_master_stats.last_entered);
+}
+
+void msm_rpmh_master_stats_update(void)
+{
+	int i;
+	struct msm_rpmh_profile_unit profile_unit[NUM_UNIT];
+
+	if (!rpmh_unit_base)
+		return;
+
+	for (i = POWER_DOWN_END; i < NUM_UNIT; i++) {
+		profile_unit[i].valid = readl_relaxed(rpmh_unit_base +
+						GET_ADDR(REG_VALID, i));
+
+		/*
+		 * Do not update APSS stats if valid bit is not set.
+		 * It means APSS did not execute cx-off sequence.
+		 * This can be due to fall through at some point.
+		 */
+
+		if (!(profile_unit[i].valid & BIT(REG_VALID)))
+			return;
+
+		profile_unit[i].value = readl_relaxed(rpmh_unit_base +
+						GET_ADDR(REG_DATA_LO, i));
+		profile_unit[i].value |= ((uint64_t)
+					readl_relaxed(rpmh_unit_base +
+					GET_ADDR(REG_DATA_HI, i)) << 32);
+	}
+	msm_rpmh_apss_master_stats_update(profile_unit);
+}
+EXPORT_SYMBOL(msm_rpmh_master_stats_update);
+
+static int msm_rpmh_master_stats_probe(struct platform_device *pdev)
+{
+	struct rpmh_master_stats_prv_data *prvdata = NULL;
+	struct kobject *rpmh_master_stats_kobj = NULL;
+	int ret = -ENOMEM;
+
+	if (!pdev)
+		return -EINVAL;
+
+	prvdata = devm_kzalloc(&pdev->dev, sizeof(*prvdata), GFP_KERNEL);
+	if (!prvdata)
+		return ret;
+
+	rpmh_master_stats_kobj = kobject_create_and_add(
+					"rpmh_stats",
+					power_kobj);
+	if (!rpmh_master_stats_kobj)
+		return ret;
+
+	prvdata->kobj = rpmh_master_stats_kobj;
+
+	sysfs_attr_init(&prvdata->ka.attr);
+	prvdata->ka.attr.mode = 0444;
+	prvdata->ka.attr.name = "master_stats";
+	prvdata->ka.show = msm_rpmh_master_stats_show;
+	prvdata->ka.store = NULL;
+
+	ret = sysfs_create_file(prvdata->kobj, &prvdata->ka.attr);
+	if (ret) {
+		pr_err("sysfs_create_file failed\n");
+		goto fail_sysfs;
+	}
+
+	rpmh_unit_base = of_iomap(pdev->dev.of_node, 0);
+	if (!rpmh_unit_base) {
+		pr_err("Failed to get rpmh_unit_base\n");
+		ret = -ENOMEM;
+		goto fail_iomap;
+	}
+
+	apss_master_stats.version_id = 0x1;
+	platform_set_drvdata(pdev, prvdata);
+	return ret;
+
+fail_iomap:
+	sysfs_remove_file(prvdata->kobj, &prvdata->ka.attr);
+fail_sysfs:
+	kobject_put(prvdata->kobj);
+	return ret;
+}
+
+static int msm_rpmh_master_stats_remove(struct platform_device *pdev)
+{
+	struct rpmh_master_stats_prv_data *prvdata;
+
+	if (!pdev)
+		return -EINVAL;
+
+	prvdata = (struct rpmh_master_stats_prv_data *)
+				platform_get_drvdata(pdev);
+
+	sysfs_remove_file(prvdata->kobj, &prvdata->ka.attr);
+	kobject_put(prvdata->kobj);
+	platform_set_drvdata(pdev, NULL);
+	iounmap(rpmh_unit_base);
+	rpmh_unit_base = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id rpmh_master_table[] = {
+	{.compatible = "qcom,rpmh-master-stats-v1"},
+	{},
+};
+
+static struct platform_driver msm_rpmh_master_stats_driver = {
+	.probe	= msm_rpmh_master_stats_probe,
+	.remove = msm_rpmh_master_stats_remove,
+	.driver = {
+		.name = "msm_rpmh_master_stats",
+		.of_match_table = rpmh_master_table,
+	},
+};
+
+module_platform_driver(msm_rpmh_master_stats_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPMH Master Statistics driver");
+MODULE_ALIAS("platform:msm_rpmh_master_stat_log");
diff --git a/drivers/soc/qcom/rpmh_master_stat.h b/drivers/soc/qcom/rpmh_master_stat.h
new file mode 100644
index 0000000..a331eef
--- /dev/null
+++ b/drivers/soc/qcom/rpmh_master_stat.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#if defined(CONFIG_QTI_RPM_STATS_LOG)
+
+void msm_rpmh_master_stats_update(void);
+
+#else
+
+static inline void msm_rpmh_master_stats_update(void) {}
+
+#endif
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
new file mode 100644
index 0000000..f00219f
--- /dev/null
+++ b/drivers/soc/qcom/system_pm.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/rpmh.h>
+#include <clocksource/arm_arch_timer.h>
+#include <soc/qcom/lpm_levels.h>
+
+#include "rpmh_master_stat.h"
+
+#define PDC_TIME_VALID_SHIFT	31
+#define PDC_TIME_UPPER_MASK	0xFFFFFF
+
+static struct device *dev;
+
+static int setup_wakeup(uint32_t lo, uint32_t hi)
+{
+	struct tcs_cmd cmd[2] = { { 0 } };
+
+	cmd[0].data =  hi & PDC_TIME_UPPER_MASK;
+	cmd[0].data |= 1 << PDC_TIME_VALID_SHIFT;
+	cmd[1].data = lo;
+
+	return rpmh_write_pdc_data(dev, cmd, ARRAY_SIZE(cmd));
+}
+
+static int system_sleep_update_wakeup(bool from_idle)
+{
+	uint32_t lo = ~0U, hi = ~0U;
+
+	/* Read the hardware to get the most accurate value */
+	arch_timer_mem_get_cval(&lo, &hi);
+
+	return setup_wakeup(lo, hi);
+}
+
+/**
+ * system_sleep_allowed() - Returns if its okay to enter system low power modes
+ */
+static bool system_sleep_allowed(void)
+{
+	return (rpmh_ctrlr_idle(dev) == 0);
+}
+
+/**
+ * system_sleep_enter() - Activties done when entering system low power modes
+ *
+ * Returns 0 for success or error values from writing the sleep/wake values to
+ * the hardware block.
+ */
+static int system_sleep_enter(struct cpumask *mask)
+{
+	return rpmh_flush(dev);
+}
+
+/**
+ * system_sleep_exit() - Activities done when exiting system low power modes
+ */
+static void system_sleep_exit(void)
+{
+	msm_rpmh_master_stats_update();
+}
+
+static struct system_pm_ops pm_ops = {
+	.enter = system_sleep_enter,
+	.exit = system_sleep_exit,
+	.update_wakeup = system_sleep_update_wakeup,
+	.sleep_allowed = system_sleep_allowed,
+};
+
+static int sys_pm_probe(struct platform_device *pdev)
+{
+	dev = &pdev->dev;
+	return register_system_pm_ops(&pm_ops);
+}
+
+static const struct of_device_id sys_pm_drv_match[] = {
+	{ .compatible = "qcom,system-pm", },
+	{ }
+};
+
+static struct platform_driver sys_pm_driver = {
+	.probe = sys_pm_probe,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.suppress_bind_attrs = true,
+		.of_match_table = sys_pm_drv_match,
+	},
+};
+builtin_platform_driver(sys_pm_driver);
diff --git a/include/soc/qcom/event_timer.h b/include/soc/qcom/event_timer.h
new file mode 100644
index 0000000..9b37669
--- /dev/null
+++ b/include/soc/qcom/event_timer.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2012, 2014,2017 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
+#define __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
+
+#include <linux/hrtimer.h>
+
+struct event_timer_info;
+
+#ifdef CONFIG_MSM_EVENT_TIMER
+/**
+ * add_event_timer() : Add a wakeup event. Intended to be called
+ *                     by clients once. Returns a handle to be used
+ *                     for future transactions.
+ * @irq : Interrupt number to track affinity.
+ * @function : The callback function will be called when event
+ *             timer expires.
+ * @data : Callback data provided by client.
+ */
+struct event_timer_info *add_event_timer(uint32_t irq,
+				void (*function)(void *), void *data);
+
+/** activate_event_timer() : Set the expiration time for an event in absolute
+ *                           ktime. This is a oneshot event timer, clients
+ *                           should call this again to set another expiration.
+ *  @event : Event handle.
+ *  @event_time : Event time in absolute ktime.
+ */
+void activate_event_timer(struct event_timer_info *event, ktime_t event_time);
+
+/**
+ * deactivate_event_timer() : Deactivate an event timer.
+ * @event: event handle.
+ */
+void deactivate_event_timer(struct event_timer_info *event);
+
+/**
+ * destroy_event_timer() : Free the event info data structure allocated during
+ * add_event_timer().
+ * @event: event handle.
+ */
+void destroy_event_timer(struct event_timer_info *event);
+
+/**
+ * get_next_event_timer() : Get the next wakeup event.
+ *                          returns a ktime value of the next
+ *                          expiring event.
+ */
+ktime_t get_next_event_time(int cpu);
+#else
+static inline void *add_event_timer(uint32_t irq, void (*function)(void *),
+						void *data)
+{
+	return NULL;
+}
+
+static inline void activate_event_timer(void *event, ktime_t event_time) {}
+
+static inline void  deactivate_event_timer(void *event) {}
+
+static inline void destroy_event_timer(void *event) {}
+
+static inline ktime_t get_next_event_time(int cpu)
+{
+	return ns_to_ktime(0);
+}
+
+#endif /* CONFIG_MSM_EVENT_TIMER_MANAGER */
+#endif /* __ARCH_ARM_MACH_MSM_EVENT_TIMER_H */
diff --git a/include/soc/qcom/lpm-stats.h b/include/soc/qcom/lpm-stats.h
new file mode 100644
index 0000000..4bc4d7b8
--- /dev/null
+++ b/include/soc/qcom/lpm-stats.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2014-2015,2017 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_LPM_STATS_H
+#define __ARCH_ARM_MACH_MSM_LPM_STATS_H
+
+struct lpm_stats;
+
+#define MAX_STR_LEN 256
+
+struct lifo_stats {
+	uint32_t last_in;
+	uint32_t first_out;
+};
+
+struct lpm_stats {
+	char name[MAX_STR_LEN];
+	struct level_stats *time_stats;
+	uint32_t num_levels;
+	struct lifo_stats lifo;
+	struct lpm_stats *parent;
+	struct list_head sibling;
+	struct list_head child;
+	struct cpumask mask;
+	struct dentry *directory;
+	int64_t sleep_time;
+	bool is_cpu;
+};
+
+
+
+#ifdef CONFIG_MSM_IDLE_STATS
+struct lpm_stats *lpm_stats_config_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent,
+	struct cpumask *mask);
+void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index);
+void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
+				bool success);
+void lpm_stats_cpu_enter(uint32_t index, uint64_t time);
+void lpm_stats_cpu_exit(uint32_t index, uint64_t time, bool success);
+void lpm_stats_suspend_enter(void);
+void lpm_stats_suspend_exit(void);
+#else
+static inline struct lpm_stats *lpm_stats_config_level(const char *name,
+	const char **levels, int num_levels, struct lpm_stats *parent,
+	struct cpumask *mask)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void lpm_stats_cluster_enter(struct lpm_stats *stats,
+						uint32_t index)
+{ }
+
+static inline void lpm_stats_cluster_exit(struct lpm_stats *stats,
+					uint32_t index, bool success)
+{ }
+
+static inline void lpm_stats_cpu_enter(uint32_t index, uint64_t time)
+{ }
+
+static inline void lpm_stats_cpu_exit(uint32_t index, bool success,
+							uint64_t time)
+{ }
+
+static inline void lpm_stats_suspend_enter(void)
+{ }
+
+static inline void lpm_stats_suspend_exit(void)
+{ }
+#endif
+#endif  /* __ARCH_ARM_MACH_MSM_LPM_STATS_H */
diff --git a/include/soc/qcom/lpm_levels.h b/include/soc/qcom/lpm_levels.h
new file mode 100644
index 0000000..21a3644
--- /dev/null
+++ b/include/soc/qcom/lpm_levels.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_QCOM_LPM_LEVEL_H__
+#define __SOC_QCOM_LPM_LEVEL_H__
+
+struct system_pm_ops {
+	int (*enter)(struct cpumask *mask);
+	void (*exit)(void);
+	int (*update_wakeup)(bool b);
+	bool (*sleep_allowed)(void);
+};
+
+#ifdef CONFIG_MSM_PM
+uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops);
+#else
+static inline uint32_t register_system_pm_ops(struct system_pm_ops *pm_ops)
+{ return -ENODEV; }
+#endif
+
+#endif
diff --git a/include/soc/qcom/pm.h b/include/soc/qcom/pm.h
new file mode 100644
index 0000000..c77c8ef
--- /dev/null
+++ b/include/soc/qcom/pm.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_PM_H
+#define __ARCH_ARM_MACH_MSM_PM_H
+
+#include <linux/types.h>
+#include <linux/cpuidle.h>
+#include <dt-bindings/msm/pm.h>
+
+struct latency_level {
+	int affinity_level;
+	int reset_level;
+	const char *level_name;
+};
+
+#ifdef CONFIG_MSM_PM
+
+s32 msm_cpuidle_get_deep_idle_latency(void);
+
+/**
+ * lpm_get_latency() - API to get latency for a low power mode
+ * @latency_level:	pointer to structure with below elements
+ * affinity_level: The level (CPU/L2/CCI etc.) for which the
+ *	latency is required.
+ *	LPM_AFF_LVL_CPU : CPU level
+ *	LPM_AFF_LVL_L2  : L2 level
+ *	LPM_AFF_LVL_CCI : CCI level
+ * reset_level: Can be passed "LPM_RESET_LVL_GDHS" for
+ *	low power mode with control logic power collapse or
+ *	"LPM_RESET_LVL_PC" for low power mode with control and
+ *	memory logic power collapse or "LPM_RESET_LVL_RET" for
+ *	retention mode.
+ * level_name: Pointer to the cluster name for which the latency
+ *	is required or NULL if the minimum value out of all the
+ *	clusters is to be returned. For CPU level, the name of the
+ *	L2 cluster to be passed. For CCI it has no effect.
+ * @latency:	address to get the latency value.
+ *
+ * latency value will be for the particular cluster or the minimum
+ * value out of all the clusters at the particular affinity_level
+ * and reset_level.
+ *
+ * Return: 0 for success; Error number for failure.
+ */
+int lpm_get_latency(struct latency_level *level, uint32_t *latency);
+
+#else
+
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+
+static inline int lpm_get_latency(struct latency_level *level,
+						uint32_t *latency)
+{
+	return 0;
+}
+#endif
+
+#endif  /* __ARCH_ARM_MACH_MSM_PM_H */
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
new file mode 100644
index 0000000..ea4b86d
--- /dev/null
+++ b/include/trace/events/trace_msm_low_power.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_low_power
+
+#if !defined(_TRACE_MSM_LOW_POWER_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_LOW_POWER_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cpu_power_select,
+
+	TP_PROTO(int index, u32 sleep_us, u32 latency, u32 next_event_us),
+
+	TP_ARGS(index, sleep_us, latency, next_event_us),
+
+	TP_STRUCT__entry(
+		__field(int, index)
+		__field(u32, sleep_us)
+		__field(u32, latency)
+		__field(u32, next_event_us)
+	),
+
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->sleep_us = sleep_us;
+		__entry->latency = latency;
+		__entry->next_event_us = next_event_us;
+	),
+
+	TP_printk("idx:%d sleep_time:%u latency:%u next_event:%u",
+		__entry->index, __entry->sleep_us, __entry->latency,
+		__entry->next_event_us)
+);
+
+TRACE_EVENT(cpu_pred_select,
+
+	TP_PROTO(u32 predtype, u64 predicted, u32 tmr_time),
+
+	TP_ARGS(predtype, predicted, tmr_time),
+
+	TP_STRUCT__entry(
+		__field(u32, predtype)
+		__field(u64, predicted)
+		__field(u32, tmr_time)
+	),
+
+	TP_fast_assign(
+		__entry->predtype = predtype;
+		__entry->predicted = predicted;
+		__entry->tmr_time = tmr_time;
+	),
+
+	TP_printk("pred:%u time:%lu tmr_time:%u",
+		__entry->predtype, (unsigned long)__entry->predicted,
+		__entry->tmr_time)
+);
+
+TRACE_EVENT(cpu_pred_hist,
+
+	TP_PROTO(int idx, u32 resi, u32 sample, u32 tmr),
+
+	TP_ARGS(idx, resi, sample, tmr),
+
+	TP_STRUCT__entry(
+		__field(int, idx)
+		__field(u32, resi)
+		__field(u32, sample)
+		__field(u32, tmr)
+	),
+
+	TP_fast_assign(
+		__entry->idx = idx;
+		__entry->resi = resi;
+		__entry->sample = sample;
+		__entry->tmr = tmr;
+	),
+
+	TP_printk("idx:%d resi:%u sample:%u tmr:%u",
+		__entry->idx, __entry->resi,
+		__entry->sample, __entry->tmr)
+);
+
+TRACE_EVENT(cpu_idle_enter,
+
+	TP_PROTO(int index),
+
+	TP_ARGS(index),
+
+	TP_STRUCT__entry(
+		__field(int, index)
+	),
+
+	TP_fast_assign(
+		__entry->index = index;
+	),
+
+	TP_printk("idx:%d",
+		__entry->index)
+);
+
+TRACE_EVENT(cpu_idle_exit,
+
+	TP_PROTO(int index, bool success),
+
+	TP_ARGS(index, success),
+
+	TP_STRUCT__entry(
+		__field(int, index)
+		__field(bool, success)
+	),
+
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->success = success;
+	),
+
+	TP_printk("idx:%d success:%d",
+		__entry->index,
+		__entry->success)
+);
+
+TRACE_EVENT(cluster_enter,
+
+	TP_PROTO(const char *name, int index, unsigned long sync_cpus,
+		unsigned long child_cpus, bool from_idle),
+
+	TP_ARGS(name, index, sync_cpus, child_cpus, from_idle),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, index)
+		__field(unsigned long, sync_cpus)
+		__field(unsigned long, child_cpus)
+		__field(bool, from_idle)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->index = index;
+		__entry->sync_cpus = sync_cpus;
+		__entry->child_cpus = child_cpus;
+		__entry->from_idle = from_idle;
+	),
+
+	TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d",
+		__entry->name,
+		__entry->index,
+		__entry->sync_cpus,
+		__entry->child_cpus,
+		__entry->from_idle)
+);
+
+TRACE_EVENT(cluster_exit,
+
+	TP_PROTO(const char *name, int index, unsigned long sync_cpus,
+		unsigned long child_cpus, bool from_idle),
+
+	TP_ARGS(name, index, sync_cpus, child_cpus, from_idle),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, index)
+		__field(unsigned long, sync_cpus)
+		__field(unsigned long, child_cpus)
+		__field(bool, from_idle)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->index = index;
+		__entry->sync_cpus = sync_cpus;
+		__entry->child_cpus = child_cpus;
+		__entry->from_idle = from_idle;
+	),
+
+	TP_printk("cluster_name:%s idx:%d sync:0x%lx child:0x%lx idle:%d",
+		__entry->name,
+		__entry->index,
+		__entry->sync_cpus,
+		__entry->child_cpus,
+		__entry->from_idle)
+);
+
+TRACE_EVENT(cluster_pred_select,
+
+	TP_PROTO(const char *name, int index, u32 sleep_us,
+				u32 latency, int pred, u32 pred_us),
+
+	TP_ARGS(name, index, sleep_us, latency, pred, pred_us),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, index)
+		__field(u32, sleep_us)
+		__field(u32, latency)
+		__field(int, pred)
+		__field(u32, pred_us)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->index = index;
+		__entry->sleep_us = sleep_us;
+		__entry->latency = latency;
+		__entry->pred = pred;
+		__entry->pred_us = pred_us;
+	),
+
+	TP_printk("name:%s idx:%d sleep_time:%u latency:%u pred:%d pred_us:%u",
+		__entry->name, __entry->index, __entry->sleep_us,
+		__entry->latency, __entry->pred, __entry->pred_us)
+);
+
+TRACE_EVENT(cluster_pred_hist,
+
+	TP_PROTO(const char *name, int idx, u32 resi,
+					u32 sample, u32 tmr),
+
+	TP_ARGS(name, idx, resi, sample, tmr),
+
+	TP_STRUCT__entry(
+		__field(const char *, name)
+		__field(int, idx)
+		__field(u32, resi)
+		__field(u32, sample)
+		__field(u32, tmr)
+	),
+
+	TP_fast_assign(
+		__entry->name = name;
+		__entry->idx = idx;
+		__entry->resi = resi;
+		__entry->sample = sample;
+		__entry->tmr = tmr;
+	),
+
+	TP_printk("name:%s idx:%d resi:%u sample:%u tmr:%u",
+		__entry->name, __entry->idx, __entry->resi,
+		__entry->sample, __entry->tmr)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_low_power
+#include <trace/define_trace.h>