Merge "drivers: cpuidle: lpm-levels: Support for different CPUs in a cluster"
diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
index ae476d0..797dbcc 100644
--- a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
+++ b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt
@@ -28,9 +28,6 @@
- qcom,default-level: The default low power level that a cluster is
programmed. The SPM of the corresponding device is configured at this
low power mode by default.
- - qcom,cpu: List of CPU phandles to identify the CPUs associated with
- this cluster. This property is required if and only if the cluster
- node contains a qcom,pm-cpu node.
qcom,pm-cluster contains qcom,pm-cluster-level nodes which identify
the various low power modes that the cluster can enter. The
@@ -103,9 +100,13 @@
power collapse (PC)
[Node bindings for qcom,pm-cpu]
-qcom,pm-cpu contains the low power modes that a cpu could enter. Currently it
-doesn't have any required properties and is a container for
-qcom,pm-cpu-levels.
+qcom,pm-cpu contains the low power modes that a cpu could enter and the CPUs
+that share the parameters.It contains the following properties.
+ - qcom,cpu: List of CPU phandles to identify the CPUs associated with
+ this cluster.
+ - qcom,pm-cpu-levels: The different low power modes that a CPU could
+ enter. The following section explains the required properties of this
+ node.
[Node bindings for qcom,pm-cpu-levels]
Required properties:
@@ -184,7 +185,6 @@
label = "a53";
qcom,spm-device-names = "l2";
qcom,default-level=<0>;
- qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,pm-cluster-level@0{
reg = <0>;
@@ -210,6 +210,7 @@
qcom,pm-cpu {
#address-cells = <1>;
#size-cells = <0>;
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,pm-cpu-level@0 {
reg = <0>;
qcom,spm-cpu-mode = "wfi";
@@ -255,7 +256,6 @@
label = "a57";
qcom,spm-device-names = "l2";
qcom,default-level=<0>;
- qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
qcom,pm-cluster-level@0{
reg = <0>;
@@ -281,6 +281,7 @@
qcom,pm-cpu {
#address-cells = <1>;
#size-cells = <0>;
+ qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
qcom,pm-cpu-level@0 {
reg = <0>;
qcom,spm-cpu-mode = "wfi";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 6806145..6215771 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -23,8 +23,6 @@
#size-cells = <0>;
label = "L3";
qcom,spm-device-names = "L3";
- qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5 &CPU6
- &CPU7>;
qcom,psci-mode-shift = <4>;
qcom,psci-mode-mask = <0xfff>;
@@ -86,12 +84,64 @@
qcom,is-reset;
qcom,notify-rpm;
};
-
- qcom,pm-cpu {
+ qcom,pm-cpu@0 {
#address-cells = <1>;
#size-cells = <0>;
qcom,psci-mode-shift = <0>;
qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
+
+ qcom,pm-cpu-level@0 { /* C1 */
+ reg = <0>;
+ qcom,spm-cpu-mode = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <43>;
+ qcom,ss-power = <454>;
+ qcom,energy-overhead = <38639>;
+ qcom,time-overhead = <83>;
+ };
+
+ qcom,pm-cpu-level@1 { /* C2D */
+ reg = <1>;
+ qcom,psci-cpu-mode = <0x2>;
+ qcom,spm-cpu-mode = "ret";
+ qcom,latency-us = <86>;
+ qcom,ss-power = <449>;
+ qcom,energy-overhead = <78456>;
+ qcom,time-overhead = <167>;
+ };
+
+ qcom,pm-cpu-level@2 { /* C3 */
+ reg = <2>;
+ qcom,spm-cpu-mode = "pc";
+ qcom,psci-cpu-mode = <0x3>;
+ qcom,latency-us = <612>;
+ qcom,ss-power = <436>;
+ qcom,energy-overhead = <418225>;
+ qcom,time-overhead = <885>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,pm-cpu-level@3 { /* C4 */
+ reg = <3>;
+ qcom,spm-cpu-mode = "rail-pc";
+ qcom,psci-cpu-mode = <0x4>;
+ qcom,latency-us = <700>;
+ qcom,ss-power = <400>;
+ qcom,energy-overhead = <428225>;
+ qcom,time-overhead = <1000>;
+ qcom,is-reset;
+ qcom,use-broadcast-timer;
+ };
+ };
+
+ qcom,pm-cpu@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+ qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
qcom,pm-cpu-level@0 { /* C1 */
reg = <0>;
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index ed239c4..39e0484 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -21,7 +21,6 @@
#include <linux/moduleparam.h>
#include "lpm-levels.h"
-bool use_psci;
enum lpm_type {
IDLE = 0,
SUSPEND,
@@ -306,6 +305,7 @@
struct lpm_level_avail *level_list = NULL;
char cpu_name[20] = {0};
int ret = 0;
+ struct list_head *pos;
cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) *
cpumask_weight(&p->child_cpus), GFP_KERNEL);
@@ -313,38 +313,45 @@
return -ENOMEM;
cpu_idx = 0;
- for_each_cpu(cpu, &p->child_cpus) {
- snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
- cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent);
- if (!cpu_kobj[cpu_idx]) {
- ret = -ENOMEM;
- goto release_kobj;
- }
+ list_for_each(pos, &p->cpu) {
+ struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list);
- level_list = devm_kzalloc(&lpm_pdev->dev,
- p->cpu->nlevels * sizeof(*level_list),
- GFP_KERNEL);
- if (!level_list) {
- ret = -ENOMEM;
- goto release_kobj;
- }
-
- /*
- * Skip enable/disable for WFI. cpuidle expects WFI to be
- * available at all times.
- */
- for (i = 1; i < p->cpu->nlevels; i++) {
-
- level_list[i].latency_us = p->levels[i].pwr.latency_us;
- ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
- cpu_kobj[cpu_idx], &level_list[i],
- (void *)p->cpu, cpu, true);
- if (ret)
+ for_each_cpu(cpu, &lpm_cpu->related_cpus) {
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+ cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name,
+ parent);
+ if (!cpu_kobj[cpu_idx]) {
+ ret = -ENOMEM;
goto release_kobj;
- }
+ }
- cpu_level_available[cpu] = level_list;
- cpu_idx++;
+ level_list = devm_kzalloc(&lpm_pdev->dev,
+ lpm_cpu->nlevels * sizeof(*level_list),
+ GFP_KERNEL);
+ if (!level_list) {
+ ret = -ENOMEM;
+ goto release_kobj;
+ }
+
+ /*
+ * Skip enable/disable for WFI. cpuidle expects WFI to
+ * be available at all times.
+ */
+ for (i = 1; i < lpm_cpu->nlevels; i++) {
+ level_list[i].latency_us =
+ p->levels[i].pwr.latency_us;
+ ret = create_lvl_avail_nodes(
+ lpm_cpu->levels[i].name,
+ cpu_kobj[cpu_idx],
+ &level_list[i],
+ (void *)lpm_cpu, cpu, true);
+ if (ret)
+ goto release_kobj;
+ }
+
+ cpu_level_available[cpu] = level_list;
+ cpu_idx++;
+ }
}
return ret;
@@ -385,7 +392,7 @@
return ret;
}
- if (p->cpu) {
+ if (!list_empty(&p->cpu)) {
ret = create_cpu_lvl_nodes(p, cluster_kobj);
if (ret)
return ret;
@@ -431,30 +438,27 @@
return ret;
}
- if (use_psci) {
- key = "qcom,psci-mode-shift";
- ret = of_property_read_u32(node, key,
- &c->psci_mode_shift);
- if (ret) {
- pr_err("%s(): Failed to read param: %s\n",
- __func__, key);
- return ret;
- }
+ key = "qcom,psci-mode-shift";
+ ret = of_property_read_u32(node, key,
+ &c->psci_mode_shift);
+ if (ret) {
+ pr_err("%s(): Failed to read param: %s\n",
+ __func__, key);
+ return ret;
+ }
- key = "qcom,psci-mode-mask";
- ret = of_property_read_u32(node, key,
- &c->psci_mode_mask);
- if (ret) {
- pr_err("%s(): Failed to read param: %s\n",
- __func__, key);
- return ret;
- }
+ key = "qcom,psci-mode-mask";
+ ret = of_property_read_u32(node, key,
+ &c->psci_mode_mask);
+ if (ret) {
+ pr_err("%s(): Failed to read param: %s\n",
+ __func__, key);
+ return ret;
+ }
- /* Set ndevice to 1 as default */
- c->ndevices = 1;
+ /* Set ndevice to 1 as default */
+ c->ndevices = 1;
- } else
- pr_warn("Target supports PSCI only\n");
return 0;
}
@@ -503,22 +507,14 @@
if (ret)
goto failed;
- if (use_psci) {
- char *k = "qcom,psci-mode";
+ key = "qcom,psci-mode";
- ret = of_property_read_u32(node, k, &level->psci_id);
- if (ret)
- goto failed;
-
- level->is_reset = of_property_read_bool(node, "qcom,is-reset");
- } else
- pr_warn("Build supports PSCI targets only");
-
- key = "label";
- ret = of_property_read_string(node, key, &level->level_name);
+ ret = of_property_read_u32(node, key, &level->psci_id);
if (ret)
goto failed;
+ level->is_reset = of_property_read_bool(node, "qcom,is-reset");
+
if (cluster->nlevels != cluster->default_level) {
key = "min child idx";
ret = of_property_read_u32(node, "qcom,min-child-idx",
@@ -531,10 +527,6 @@
}
level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm");
- level->disable_dynamic_routing = of_property_read_bool(node,
- "qcom,disable-dynamic-int-routing");
- level->last_core_only = of_property_read_bool(node,
- "qcom,last-core-only");
key = "parse_power_params";
ret = parse_power_params(node, &level->pwr);
@@ -569,20 +561,16 @@
return ret;
}
- if (use_psci) {
- key = "qcom,psci-cpu-mode";
+ key = "qcom,psci-cpu-mode";
+ ret = of_property_read_u32(n, key, &l->psci_id);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ n->name);
+ return ret;
+ }
+ key = "qcom,hyp-psci";
- ret = of_property_read_u32(n, key, &l->psci_id);
- if (ret) {
- pr_err("Failed reading %s on device %s\n", key,
- n->name);
- return ret;
- }
- key = "qcom,hyp-psci";
-
- l->hyp_psci = of_property_read_bool(n, key);
- } else
- pr_warn("Build supports PSCI targets only");
+ l->hyp_psci = of_property_read_bool(n, key);
return 0;
}
@@ -639,51 +627,26 @@
next_pwr->time_overhead_us : residency;
}
-static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu)
{
+
struct device_node *n;
- int ret = -ENOMEM;
- int i, j;
- char *key;
-
- c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL);
- if (!c->cpu)
- return ret;
-
- c->cpu->parent = c;
- if (use_psci) {
-
- key = "qcom,psci-mode-shift";
-
- ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift);
- if (ret) {
- pr_err("Failed reading %s on device %s\n", key,
- node->name);
- return ret;
- }
- key = "qcom,psci-mode-mask";
-
- ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask);
- if (ret) {
- pr_err("Failed reading %s on device %s\n", key,
- node->name);
- return ret;
- }
- }
+ int ret, i, j;
+ const char *key;
for_each_child_of_node(node, n) {
- struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels];
+ struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels];
- c->cpu->nlevels++;
+ cpu->nlevels++;
ret = parse_cpu_mode(n, l);
if (ret < 0) {
pr_info("Failed %s\n", l->name);
- goto failed;
+ return ret;
}
ret = parse_power_params(n, &l->pwr);
if (ret)
- goto failed;
+ return ret;
key = "qcom,use-broadcast-timer";
l->use_bc_timer = of_property_read_bool(n, key);
@@ -698,32 +661,83 @@
if (ret == -EINVAL)
l->reset_level = LPM_RESET_LVL_NONE;
else if (ret)
- goto failed;
+ return ret;
}
- for (i = 0; i < c->cpu->nlevels; i++) {
- for (j = 0; j < c->cpu->nlevels; j++) {
+ for (i = 0; i < cpu->nlevels; i++) {
+ for (j = 0; j < cpu->nlevels; j++) {
if (i >= j) {
- c->cpu->levels[i].pwr.residencies[j] = 0;
+ cpu->levels[i].pwr.residencies[j] = 0;
continue;
}
- c->cpu->levels[i].pwr.residencies[j] =
- calculate_residency(&c->cpu->levels[i].pwr,
- &c->cpu->levels[j].pwr);
+ cpu->levels[i].pwr.residencies[j] =
+ calculate_residency(&cpu->levels[i].pwr,
+ &cpu->levels[j].pwr);
pr_err("%s: idx %d %u\n", __func__, j,
- c->cpu->levels[i].pwr.residencies[j]);
+ cpu->levels[i].pwr.residencies[j]);
}
}
+ for_each_cpu(i, &cpu->related_cpus) {
+ per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev,
+ sizeof(uint32_t) * cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(max_residency, i))
+ return -ENOMEM;
+ per_cpu(min_residency, i) = devm_kzalloc(
+ &lpm_pdev->dev,
+ sizeof(uint32_t) * cpu->nlevels,
+ GFP_KERNEL);
+ if (!per_cpu(min_residency, i))
+ return -ENOMEM;
+ set_optimum_cpu_residency(cpu, i, true);
+ }
return 0;
-failed:
- for (i = 0; i < c->cpu->nlevels; i++) {
- kfree(c->cpu->levels[i].name);
- c->cpu->levels[i].name = NULL;
+}
+
+static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c)
+{
+ int ret = -ENOMEM, i;
+ char *key;
+ struct lpm_cpu *cpu;
+
+ cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL);
+ if (!cpu)
+ return ret;
+
+ if (get_cpumask_for_node(node, &cpu->related_cpus))
+ return -EINVAL;
+
+ cpu->parent = c;
+
+ key = "qcom,psci-mode-shift";
+ ret = of_property_read_u32(node, key, &cpu->psci_mode_shift);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ node->name);
+ return ret;
}
- kfree(c->cpu);
- c->cpu = NULL;
+ key = "qcom,psci-mode-mask";
+
+ ret = of_property_read_u32(node, key, &cpu->psci_mode_mask);
+ if (ret) {
+ pr_err("Failed reading %s on device %s\n", key,
+ node->name);
+ return ret;
+ }
+
+ if (parse_cpu(node, cpu))
+ goto failed;
+ cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus);
+ list_add(&cpu->list, &c->cpu);
+ return 0;
+failed:
+ for (i = 0; i < cpu->nlevels; i++) {
+ kfree(cpu->levels[i].name);
+ cpu->levels[i].name = NULL;
+ }
+ kfree(cpu);
pr_err("%s(): Failed with error code:%d\n", __func__, ret);
return ret;
}
@@ -731,6 +745,7 @@
void free_cluster_node(struct lpm_cluster *cluster)
{
struct list_head *list;
+ struct lpm_cpu *cpu, *n;
int i;
list_for_each(list, &cluster->child) {
@@ -741,22 +756,21 @@
free_cluster_node(n);
};
- if (cluster->cpu) {
- for (i = 0; i < cluster->cpu->nlevels; i++) {
- kfree(cluster->cpu->levels[i].name);
- cluster->cpu->levels[i].name = NULL;
+ list_for_each_entry_safe(cpu, n, &cluster->cpu, list) {
+ struct lpm_cpu *cpu = list_entry(list, typeof(*cpu), list);
+
+ for (i = 0; i < cpu->nlevels; i++) {
+ kfree(cpu->levels[i].name);
+ cpu->levels[i].name = NULL;
}
+ list_del(list);
}
for (i = 0; i < cluster->nlevels; i++) {
kfree(cluster->levels[i].mode);
cluster->levels[i].mode = NULL;
}
- kfree(cluster->cpu);
kfree(cluster->name);
- kfree(cluster->lpm_dev);
- cluster->cpu = NULL;
cluster->name = NULL;
- cluster->lpm_dev = NULL;
cluster->ndevices = 0;
}
@@ -785,6 +799,7 @@
goto failed_parse_params;
INIT_LIST_HEAD(&c->child);
+ INIT_LIST_HEAD(&c->cpu);
c->parent = parent;
spin_lock_init(&c->sync_lock);
c->min_child_level = NR_LPM_LEVELS;
@@ -795,7 +810,6 @@
continue;
key = "qcom,pm-cluster-level";
if (!of_node_cmp(n->name, key)) {
- WARN_ON(!use_psci && c->no_saw_devices);
if (parse_cluster_level(n, c))
goto failed_parse_cluster;
continue;
@@ -805,7 +819,6 @@
if (!of_node_cmp(n->name, key)) {
struct lpm_cluster *child;
- WARN_ON(!use_psci && c->no_saw_devices);
child = parse_cluster(n, c);
if (!child)
goto failed_parse_cluster;
@@ -819,34 +832,11 @@
key = "qcom,pm-cpu";
if (!of_node_cmp(n->name, key)) {
- /*
- * Parse the the cpu node only if a pm-cpu node
- * is available, though the mask is defined @ the
- * cluster level
- */
- if (get_cpumask_for_node(node, &c->child_cpus))
- goto failed_parse_cluster;
-
if (parse_cpu_levels(n, c))
goto failed_parse_cluster;
c->aff_level = 1;
- for_each_cpu(i, &c->child_cpus) {
- per_cpu(max_residency, i) = devm_kzalloc(
- &lpm_pdev->dev,
- sizeof(uint32_t) * c->cpu->nlevels,
- GFP_KERNEL);
- if (!per_cpu(max_residency, i))
- return ERR_PTR(-ENOMEM);
- per_cpu(min_residency, i) = devm_kzalloc(
- &lpm_pdev->dev,
- sizeof(uint32_t) * c->cpu->nlevels,
- GFP_KERNEL);
- if (!per_cpu(min_residency, i))
- return ERR_PTR(-ENOMEM);
- set_optimum_cpu_residency(c->cpu, i, true);
- }
}
}
@@ -883,8 +873,6 @@
{
struct device_node *top = NULL;
- use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci");
-
top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster");
if (!top) {
pr_err("Failed to find root node\n");
@@ -898,6 +886,7 @@
void cluster_dt_walkthrough(struct lpm_cluster *cluster)
{
struct list_head *list;
+ struct lpm_cpu *cpu;
int i, j;
static int id;
char str[10] = {0};
@@ -918,12 +907,12 @@
&cluster->name[j], &l->mode[i]);
}
- if (cluster->cpu) {
+ list_for_each_entry(cpu, &cluster->cpu, list) {
pr_info("%d\n", __LINE__);
- for (j = 0; j < cluster->cpu->nlevels; j++)
+ for (j = 0; j < cpu->nlevels; j++)
pr_info("%s\tCPU mode: %s id:%d\n", str,
- cluster->cpu->levels[j].name,
- cluster->cpu->levels[j].mode);
+ cpu->levels[j].name,
+ cpu->levels[j].mode);
}
id++;
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index e091fd8..e890192 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -53,10 +53,8 @@
#include <trace/events/trace_msm_low_power.h>
#define SCLK_HZ (32768)
-#define SCM_HANDOFF_LOCK_ID "S:7"
#define PSCI_POWER_STATE(reset) (reset << 30)
#define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24)
-static remote_spinlock_t scm_handoff_lock;
enum {
MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
@@ -106,7 +104,7 @@
static DEFINE_PER_CPU(struct lpm_history, hist);
-static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster);
+static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm);
static bool suspend_in_progress;
static struct hrtimer lpm_hrtimer;
static struct hrtimer histtimer;
@@ -209,7 +207,7 @@
struct power_params *pwr_params;
struct lpm_cpu *cpu;
struct lpm_cluster *n;
- uint32_t latency = 0;
+ uint32_t lat = 0;
int i;
list_for_each(list, child) {
@@ -218,19 +216,21 @@
if (strcmp(lat_level->level_name, n->cluster_name))
continue;
}
- cpu = n->cpu;
- for (i = 0; i < cpu->nlevels; i++) {
- level = &cpu->levels[i];
- pwr_params = &level->pwr;
- if (lat_level->reset_level == level->reset_level) {
- if ((latency > pwr_params->latency_us)
- || (!latency))
- latency = pwr_params->latency_us;
- break;
+ list_for_each_entry(cpu, &n->cpu, list) {
+ for (i = 0; i < cpu->nlevels; i++) {
+ level = &cpu->levels[i];
+ pwr_params = &level->pwr;
+ if (lat_level->reset_level
+ == level->reset_level) {
+ if ((lat > pwr_params->latency_us)
+ || (!lat))
+ lat = pwr_params->latency_us;
+ break;
+ }
}
}
}
- return latency;
+ return lat;
}
static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster,
@@ -239,9 +239,9 @@
struct lpm_cluster *n;
if ((cluster->aff_level == affinity_level)
- || ((cluster->cpu) && (affinity_level == 0)))
+ || ((!list_empty(&cluster->cpu)) && (affinity_level == 0)))
return cluster;
- else if (!cluster->cpu) {
+ else if (list_empty(&cluster->cpu)) {
n = list_entry(cluster->child.next, typeof(*n), list);
return cluster_aff_match(n, affinity_level);
} else
@@ -316,7 +316,7 @@
static int lpm_dying_cpu(unsigned int cpu)
{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
@@ -324,7 +324,7 @@
static int lpm_starting_cpu(unsigned int cpu)
{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
return 0;
@@ -378,7 +378,7 @@
static void clusttimer_cancel(void)
{
int cpu = raw_smp_processor_id();
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+ struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent;
hrtimer_try_to_cancel(&cluster->histtimer);
@@ -414,22 +414,6 @@
hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
}
-static int set_device_mode(struct lpm_cluster *cluster, int ndevice,
- struct lpm_cluster_level *level)
-{
- struct low_power_ops *ops;
-
- if (use_psci)
- return 0;
-
- ops = &cluster->lpm_dev[ndevice];
- if (ops && ops->set_mode)
- return ops->set_mode(ops, level->mode[ndevice],
- level->notify_rpm);
- else
- return -EINVAL;
-}
-
static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
struct lpm_cpu *cpu, int *idx_restrict,
uint32_t *idx_restrict_time)
@@ -953,10 +937,6 @@
if (!lpm_cluster_mode_allow(cluster, i, from_idle))
continue;
- if (level->last_core_only &&
- cpumask_weight(cpu_online_mask) > 1)
- continue;
-
if (!cpumask_equal(&cluster->num_children_in_sync,
&level->num_cpu_votes))
continue;
@@ -1001,7 +981,6 @@
bool from_idle, int predicted)
{
struct lpm_cluster_level *level = &cluster->levels[idx];
- int ret, i;
if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus)
|| is_IPI_pending(&cluster->num_children_in_sync)) {
@@ -1022,25 +1001,12 @@
ktime_to_us(ktime_get()));
}
- for (i = 0; i < cluster->ndevices; i++) {
- ret = set_device_mode(cluster, i, level);
- if (ret)
- goto failed_set_mode;
- }
if (level->notify_rpm) {
- struct cpumask nextcpu, *cpumask;
uint64_t us;
uint32_t pred_us;
- us = get_cluster_sleep_time(cluster, &nextcpu,
- from_idle, &pred_us);
- cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
-
- if (ret) {
- pr_info("Failed msm_rpm_enter_sleep() rc = %d\n", ret);
- goto failed_set_mode;
- }
-
+ us = get_cluster_sleep_time(cluster, NULL, from_idle,
+ &pred_us);
us = us + 1;
clear_predict_history();
clear_cl_predict_history();
@@ -1061,17 +1027,6 @@
}
return 0;
-failed_set_mode:
-
- for (i = 0; i < cluster->ndevices; i++) {
- int rc = 0;
-
- level = &cluster->levels[cluster->default_level];
- // rc = set_device_mode(cluster, i, level);
- WARN_ON(rc);
- }
-
- return ret;
}
static void cluster_prepare(struct lpm_cluster *cluster,
@@ -1151,7 +1106,7 @@
{
struct lpm_cluster_level *level;
bool first_cpu;
- int last_level, i, ret;
+ int last_level, i;
if (!cluster)
return;
@@ -1201,13 +1156,8 @@
last_level = cluster->last_level;
cluster->last_level = cluster->default_level;
- for (i = 0; i < cluster->ndevices; i++) {
+ for (i = 0; i < cluster->ndevices; i++)
level = &cluster->levels[cluster->default_level];
- ret = set_device_mode(cluster, i, level);
-
- WARN_ON(ret);
-
- }
cluster_notify(cluster, &cluster->levels[last_level], false);
@@ -1220,12 +1170,11 @@
spin_unlock(&cluster->sync_lock);
}
-static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index,
+static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index,
bool from_idle)
{
- struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
- bool jtag_save_restore =
- cluster->cpu->levels[cpu_index].jtag_save_restore;
+ struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
+ bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
/* Use broadcast timer for aggregating sleep mode within a cluster.
* A broadcast timer could be used in the following scenarios
@@ -1253,12 +1202,11 @@
msm_jtag_save_state();
}
-static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index,
+static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index,
bool from_idle)
{
- struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index];
- bool jtag_save_restore =
- cluster->cpu->levels[cpu_index].jtag_save_restore;
+ struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
+ bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
if (from_idle && cpu_level->use_bc_timer)
tick_broadcast_exit();
@@ -1304,13 +1252,12 @@
return state_id;
}
-#if !defined(CONFIG_CPU_V7)
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
+static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle)
{
int affinity_level = 0;
- int state_id = get_cluster_id(cluster, &affinity_level);
+ int state_id = get_cluster_id(cpu->parent, &affinity_level);
int power_state =
- PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
+ PSCI_POWER_STATE(cpu->levels[idx].is_reset);
bool success = false;
/*
* idx = 0 is the default LPM state
@@ -1324,7 +1271,7 @@
affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
state_id |= (power_state | affinity_level
- | cluster->cpu->levels[idx].psci_id);
+ | cpu->levels[idx].psci_id);
update_debug_pc_event(CPU_ENTER, state_id,
0xdeaffeed, 0xdeaffeed, true);
@@ -1335,52 +1282,17 @@
success, 0xdeaffeed, true);
return success;
}
-#elif defined(CONFIG_ARM_PSCI)
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
-{
- int affinity_level = 0;
- int state_id = get_cluster_id(cluster, &affinity_level);
- int power_state =
- PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset);
- bool success = false;
-
- if (!idx) {
- stop_critical_timings();
- wfi();
- start_critical_timings();
- return 1;
- }
-
- affinity_level = PSCI_AFFINITY_LEVEL(affinity_level);
- state_id |= (power_state | affinity_level
- | cluster->cpu->levels[idx].psci_id);
-
- update_debug_pc_event(CPU_ENTER, state_id,
- 0xdeaffeed, 0xdeaffeed, true);
- stop_critical_timings();
- success = !arm_cpuidle_suspend(state_id);
- start_critical_timings();
- update_debug_pc_event(CPU_EXIT, state_id,
- success, 0xdeaffeed, true);
-}
-#else
-bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle)
-{
- WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n");
- return false;
-}
-#endif
static int lpm_cpuidle_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+ struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
int idx;
- if (!cluster)
+ if (!cpu)
return 0;
- idx = cpu_power_select(dev, cluster->cpu);
+ idx = cpu_power_select(dev, cpu);
if (idx < 0)
return 0;
@@ -1424,18 +1336,18 @@
static int lpm_cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu);
+ struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
bool success = true;
const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
int64_t start_time = ktime_to_ns(ktime_get()), end_time;
struct power_params *pwr_params;
- pwr_params = &cluster->cpu->levels[idx].pwr;
+ pwr_params = &cpu->levels[idx].pwr;
- pwr_params = &cluster->cpu->levels[idx].pwr;
+ pwr_params = &cpu->levels[idx].pwr;
- cpu_prepare(cluster, idx, true);
- cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get()));
+ cpu_prepare(cpu, idx, true);
+ cluster_prepare(cpu->parent, cpumask, idx, true, start_time);
trace_cpu_idle_enter(idx);
lpm_stats_cpu_enter(idx, start_time);
@@ -1443,15 +1355,14 @@
if (need_resched() || (idx < 0))
goto exit;
- WARN_ON(!use_psci);
- success = psci_enter_sleep(cluster, idx, true);
+ success = psci_enter_sleep(cpu, idx, true);
exit:
end_time = ktime_to_ns(ktime_get());
lpm_stats_cpu_exit(idx, end_time, success);
- cluster_unprepare(cluster, cpumask, idx, true, end_time);
- cpu_unprepare(cluster, idx, true);
+ cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
+ cpu_unprepare(cpu, idx, true);
sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0);
end_time = ktime_to_ns(ktime_get()) - start_time;
do_div(end_time, 1000);
@@ -1521,8 +1432,9 @@
int i = 0, ret = 0;
unsigned int cpu;
struct lpm_cluster *p = NULL;
+ struct lpm_cpu *lpm_cpu;
- if (!cl->cpu) {
+ if (list_empty(&cl->cpu)) {
struct lpm_cluster *n;
list_for_each_entry(n, &cl->child, list) {
@@ -1533,51 +1445,56 @@
return ret;
}
- cl->drv = kcalloc(1, sizeof(*cl->drv), GFP_KERNEL);
- if (!cl->drv)
- return -ENOMEM;
+ list_for_each_entry(lpm_cpu, &cl->cpu, list) {
+ lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL);
+ if (!lpm_cpu->drv)
+ return -ENOMEM;
- cl->drv->name = "msm_idle";
+ lpm_cpu->drv->name = "msm_idle";
- for (i = 0; i < cl->cpu->nlevels; i++) {
- struct cpuidle_state *st = &cl->drv->states[i];
- struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i];
+ for (i = 0; i < lpm_cpu->nlevels; i++) {
+ struct cpuidle_state *st = &lpm_cpu->drv->states[i];
+ struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i];
- snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
- snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
- st->flags = 0;
- st->exit_latency = cpu_level->pwr.latency_us;
- st->power_usage = cpu_level->pwr.ss_power;
- st->target_residency = 0;
- st->enter = lpm_cpuidle_enter;
- }
-
- cl->drv->state_count = cl->cpu->nlevels;
- cl->drv->safe_state_index = 0;
- for_each_cpu(cpu, &cl->child_cpus)
- per_cpu(cpu_cluster, cpu) = cl;
-
- for_each_possible_cpu(cpu) {
- if (cpu_online(cpu))
- continue;
- p = per_cpu(cpu_cluster, cpu);
- while (p) {
- int j;
-
- spin_lock(&p->sync_lock);
- cpumask_set_cpu(cpu, &p->num_children_in_sync);
- for (j = 0; j < p->nlevels; j++)
- cpumask_copy(&p->levels[j].num_cpu_votes,
- &p->num_children_in_sync);
- spin_unlock(&p->sync_lock);
- p = p->parent;
+ snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+ snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
+ st->flags = 0;
+ st->exit_latency = cpu_level->pwr.latency_us;
+ st->power_usage = cpu_level->pwr.ss_power;
+ st->target_residency = 0;
+ st->enter = lpm_cpuidle_enter;
}
- }
- ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus);
- if (ret) {
- kfree(cl->drv);
- return -ENOMEM;
+ lpm_cpu->drv->state_count = lpm_cpu->nlevels;
+ lpm_cpu->drv->safe_state_index = 0;
+ for_each_cpu(cpu, &lpm_cpu->related_cpus)
+ per_cpu(cpu_lpm, cpu) = lpm_cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu_online(cpu))
+ continue;
+ if (per_cpu(cpu_lpm, cpu))
+ p = per_cpu(cpu_lpm, cpu)->parent;
+ while (p) {
+ int j;
+
+ spin_lock(&p->sync_lock);
+ cpumask_set_cpu(cpu, &p->num_children_in_sync);
+ for (j = 0; j < p->nlevels; j++)
+ cpumask_copy(
+ &p->levels[j].num_cpu_votes,
+ &p->num_children_in_sync);
+ spin_unlock(&p->sync_lock);
+ p = p->parent;
+ }
+ }
+ ret = cpuidle_register_cpu(lpm_cpu->drv,
+ &lpm_cpu->related_cpus);
+
+ if (ret) {
+ kfree(lpm_cpu->drv);
+ return -ENOMEM;
+ }
}
return 0;
}
@@ -1607,7 +1524,7 @@
level_name[i] = cpu->levels[i].name;
lpm_stats_config_level("cpu", level_name, cpu->nlevels,
- parent->stats, &parent->child_cpus);
+ parent->stats, &cpu->related_cpus);
kfree(level_name);
}
@@ -1616,8 +1533,9 @@
struct lpm_cluster *parent)
{
const char **level_name;
- int i;
struct lpm_cluster *child;
+ struct lpm_cpu *cpu;
+ int i;
if (!cl)
return;
@@ -1635,10 +1553,12 @@
kfree(level_name);
- if (cl->cpu) {
- register_cpu_lpm_stats(cl->cpu, cl);
- return;
+ list_for_each_entry(cpu, &cl->cpu, list) {
+ pr_err("%s()\n", __func__);
+ register_cpu_lpm_stats(cpu, cl);
}
+ if (!list_empty(&cl->cpu))
+ return;
list_for_each_entry(child, &cl->child, list)
register_cluster_lpm_stats(child, cl);
@@ -1661,8 +1581,8 @@
static int lpm_suspend_enter(suspend_state_t state)
{
int cpu = raw_smp_processor_id();
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
- struct lpm_cpu *lpm_cpu = cluster->cpu;
+ struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu);
+ struct lpm_cluster *cluster = lpm_cpu->parent;
const struct cpumask *cpumask = get_cpu_mask(cpu);
int idx;
@@ -1675,7 +1595,7 @@
pr_err("Failed suspend\n");
return 0;
}
- cpu_prepare(cluster, idx, false);
+ cpu_prepare(lpm_cpu, idx, false);
cluster_prepare(cluster, cpumask, idx, false, 0);
if (idx > 0)
update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed,
@@ -1688,15 +1608,14 @@
* LPMs(XO and Vmin).
*/
- WARN_ON(!use_psci);
- psci_enter_sleep(cluster, idx, true);
+ psci_enter_sleep(lpm_cpu, idx, true);
if (idx > 0)
update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
false);
cluster_unprepare(cluster, cpumask, idx, false, 0);
- cpu_unprepare(cluster, idx, false);
+ cpu_unprepare(lpm_cpu, idx, false);
return 0;
}
@@ -1736,14 +1655,6 @@
hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cluster_timer_init(lpm_root_node);
- ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
- if (ret) {
- pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
- __func__, ret);
- put_online_cpus();
- return ret;
- }
-
size = num_dbg_elements * sizeof(struct lpm_debug);
lpm_debug = dma_alloc_coherent(&pdev->dev, size,
&lpm_debug_phys, GFP_KERNEL);
@@ -1812,54 +1723,3 @@
return rc;
}
late_initcall(lpm_levels_module_init);
-
-enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu)
-{
- struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
- enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON;
-
- /*
- * No need to acquire the lock if probe isn't completed yet
- * In the event of the hotplug happening before lpm probe, we want to
- * flush the cache to make sure that L2 is flushed. In particular, this
- * could cause incoherencies for a cluster architecture. This wouldn't
- * affect the idle case as the idle driver wouldn't be registered
- * before the probe function
- */
- if (!cluster)
- return MSM_SCM_L2_OFF;
-
- /*
- * Assumes L2 only. What/How parameters gets passed into TZ will
- * determine how this function reports this info back in msm-pm.c
- */
- spin_lock(&cluster->sync_lock);
-
- if (!cluster->lpm_dev) {
- retflag = MSM_SCM_L2_OFF;
- goto unlock_and_return;
- }
-
- if (!cpumask_equal(&cluster->num_children_in_sync,
- &cluster->child_cpus))
- goto unlock_and_return;
-
- if (cluster->lpm_dev)
- retflag = cluster->lpm_dev->tz_flag;
- /*
- * The scm_handoff_lock will be release by the secure monitor.
- * It is used to serialize power-collapses from this point on,
- * so that both Linux and the secure context have a consistent
- * view regarding the number of running cpus (cpu_count).
- *
- * It must be acquired before releasing the cluster lock.
- */
-unlock_and_return:
- update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef,
- 0xdeadbeef);
- trace_pre_pc_cb(retflag);
- remote_spin_lock_rlock_id(&scm_handoff_lock,
- REMOTE_SPINLOCK_TID_START + cpu);
- spin_unlock(&cluster->sync_lock);
- return retflag;
-}
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 3d35ae9..c9f272e 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -17,8 +17,6 @@
#define MAXSAMPLES 5
#define CLUST_SMPL_INVLD_TIME 40000
-extern bool use_psci;
-
struct lpm_lookup_table {
uint32_t modes;
const char *mode_name;
@@ -47,10 +45,13 @@
};
struct lpm_cpu {
+ struct list_head list;
+ struct cpumask related_cpus;
struct lpm_cpu_level levels[NR_LPM_LEVELS];
int nlevels;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
+ struct cpuidle_driver *drv;
struct lpm_cluster *parent;
};
@@ -74,21 +75,13 @@
struct cpumask num_cpu_votes;
struct power_params pwr;
bool notify_rpm;
- bool disable_dynamic_routing;
bool sync_level;
- bool last_core_only;
struct lpm_level_avail available;
unsigned int psci_id;
bool is_reset;
int reset_level;
};
-struct low_power_ops {
- struct msm_spm_device *spm;
- int (*set_mode)(struct low_power_ops *ops, int mode, bool notify_rpm);
- enum msm_pm_l2_scm_flag tz_flag;
-};
-
struct cluster_history {
uint32_t resi[MAXSAMPLES];
int mode[MAXSAMPLES];
@@ -108,16 +101,13 @@
const char *cluster_name;
const char **name;
unsigned long aff_level; /* Affinity level of the node */
- struct low_power_ops *lpm_dev;
int ndevices;
struct lpm_cluster_level levels[NR_LPM_LEVELS];
int nlevels;
- enum msm_pm_l2_scm_flag l2_flag;
int min_child_level;
int default_level;
int last_level;
- struct lpm_cpu *cpu;
- struct cpuidle_driver *drv;
+ struct list_head cpu;
spinlock_t sync_lock;
struct cpumask child_cpus;
struct cpumask num_children_in_sync;
@@ -125,14 +115,10 @@
struct lpm_stats *stats;
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
- bool no_saw_devices;
struct cluster_history history;
struct hrtimer histtimer;
};
-int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
-int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
-int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
void lpm_suspend_wake_time(uint64_t wakeup_time);
struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
diff --git a/include/soc/qcom/pm.h b/include/soc/qcom/pm.h
index a82ada6..58d011e 100644
--- a/include/soc/qcom/pm.h
+++ b/include/soc/qcom/pm.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
* Author: San Mehat <san@android.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -70,16 +70,6 @@
};
/**
- * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ
- *
- * @cpu: cpuid of the CPU going down.
- *
- * Returns the l2 flush flag enum that is passed down to TZ during power
- * collaps
- */
-enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu);
-
-/**
* msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
* @cpu: CPU on which to check for the sleep mode.
* @mode: Sleep Mode to check for.