Merge "msm: pm: Adjust for events with hard wakeups"
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index f7456ef..8218a42 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -92,8 +92,8 @@
}
static void *msm_lpm_lowest_limits(bool from_idle,
- enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
- uint32_t sleep_us, uint32_t *power)
+ enum msm_pm_sleep_mode sleep_mode,
+ struct msm_pm_time_params *time_param, uint32_t *power)
{
unsigned int cpu = smp_processor_id();
struct msm_rpmrs_level *best_level = NULL;
@@ -114,20 +114,22 @@
if (sleep_mode != level->sleep_mode)
continue;
- if (latency_us < level->latency_us)
+ if (time_param->latency_us < level->latency_us)
continue;
- if (sleep_us <= 1) {
+ if (time_param->sleep_us <= 1) {
pwr = level->energy_overhead;
- } else if (sleep_us <= level->time_overhead_us) {
- pwr = level->energy_overhead / sleep_us;
- } else if ((sleep_us >> 10) > level->time_overhead_us) {
+ } else if (time_param->sleep_us <= level->time_overhead_us) {
+ pwr = level->energy_overhead / time_param->sleep_us;
+ } else if ((time_param->sleep_us >> 10)
+ > level->time_overhead_us) {
pwr = level->steady_state_power;
} else {
pwr = level->steady_state_power;
pwr -= (level->time_overhead_us *
- level->steady_state_power)/sleep_us;
- pwr += level->energy_overhead / sleep_us;
+ level->steady_state_power) /
+ time_param->sleep_us;
+ pwr += level->energy_overhead / time_param->sleep_us;
}
if (!best_level || best_level->rs_limits.power[cpu] >= pwr) {
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index dbb23d5..60ee8f0 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -49,6 +49,7 @@
#include "spm.h"
#include "timer.h"
#include "pm-boot.h"
+#include <mach/event_timer.h>
/******************************************************************************
* Debug Definitions
@@ -112,6 +113,7 @@
"standalone_power_collapse",
};
+static struct hrtimer pm_hrtimer;
static struct msm_pm_sleep_ops pm_sleep_ops;
/*
* Write out the attribute.
@@ -645,6 +647,28 @@
return time;
}
+/**
+ * pm_hrtimer_cb() : Callback function for hrtimer created if the
+ * core needs to be awake to handle an event.
+ * @hrtimer : Pointer to hrtimer
+ */
+static enum hrtimer_restart pm_hrtimer_cb(struct hrtimer *hrtimer)
+{
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * msm_pm_set_timer() : Set an hrtimer to wakeup the core in time
+ * to handle an event.
+ */
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+ u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+ ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+ pm_hrtimer.function = pm_hrtimer_cb;
+ hrtimer_start(&pm_hrtimer, modified_ktime, HRTIMER_MODE_ABS);
+}
+
/******************************************************************************
* External Idle/Suspend Functions
*****************************************************************************/
@@ -657,15 +681,25 @@
int msm_pm_idle_prepare(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- uint32_t latency_us;
- uint32_t sleep_us;
int i;
unsigned int power_usage = -1;
int ret = 0;
+ uint32_t modified_time_us = 0;
+ struct msm_pm_time_params time_param;
- latency_us = (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
- sleep_us = (uint32_t) ktime_to_ns(tick_nohz_get_sleep_length());
- sleep_us = DIV_ROUND_UP(sleep_us, 1000);
+ time_param.latency_us =
+ (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+ time_param.sleep_us =
+ (uint32_t) (ktime_to_us(tick_nohz_get_sleep_length())
+ & UINT_MAX);
+ time_param.modified_time_us = 0;
+
+ if (!dev->cpu)
+ time_param.next_event_us =
+ (uint32_t) (ktime_to_us(get_next_event_time())
+ & UINT_MAX);
+ else
+ time_param.next_event_us = 0;
for (i = 0; i < dev->state_count; i++) {
struct cpuidle_state *state = &drv->states[i];
@@ -702,17 +736,18 @@
case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
if (!allow)
break;
+ /* fall through */
if (pm_sleep_ops.lowest_limits)
rs_limits = pm_sleep_ops.lowest_limits(true,
- mode, latency_us, sleep_us,
- &power);
+ mode, &time_param, &power);
if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
pr_info("CPU%u: %s: %s, latency %uus, "
"sleep %uus, limit %p\n",
dev->cpu, __func__, state->desc,
- latency_us, sleep_us, rs_limits);
+ time_param.latency_us,
+ time_param.sleep_us, rs_limits);
if (!rs_limits)
allow = false;
@@ -730,6 +765,7 @@
if (allow) {
if (power < power_usage) {
power_usage = power;
+ modified_time_us = time_param.modified_time_us;
ret = mode;
}
@@ -738,6 +774,8 @@
}
}
+ if (modified_time_us && !dev->cpu)
+ msm_pm_set_timer(modified_time_us);
return ret;
}
@@ -850,6 +888,11 @@
int i;
int64_t period = 0;
int64_t time = msm_pm_timer_enter_suspend(&period);
+ struct msm_pm_time_params time_param;
+
+ time_param.latency_us = -1;
+ time_param.sleep_us = -1;
+ time_param.next_event_us = 0;
if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
pr_info("%s\n", __func__);
@@ -887,8 +930,7 @@
#endif /* CONFIG_MSM_SLEEP_TIME_OVERRIDE */
if (pm_sleep_ops.lowest_limits)
rs_limits = pm_sleep_ops.lowest_limits(false,
- MSM_PM_SLEEP_MODE_POWER_COLLAPSE, -1,
- -1, &power);
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE, &time_param, &power);
if (rs_limits) {
if (pm_sleep_ops.enter_sleep)
@@ -1046,6 +1088,7 @@
suspend_set_ops(&msm_pm_ops);
msm_pm_qtimer_available();
+ hrtimer_init(&pm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
msm_cpuidle_init();
platform_driver_register(&msm_pc_counter_driver);
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
index e2553e2..51256ca 100644
--- a/arch/arm/mach-msm/pm.h
+++ b/arch/arm/mach-msm/pm.h
@@ -57,6 +57,13 @@
#define MSM_PM_MODE(cpu, mode_nr) ((cpu) * MSM_PM_SLEEP_MODE_NR + (mode_nr))
+struct msm_pm_time_params {
+ uint32_t latency_us;
+ uint32_t sleep_us;
+ uint32_t next_event_us;
+ uint32_t modified_time_us;
+};
+
struct msm_pm_platform_data {
u8 idle_supported; /* Allow device to enter mode during idle */
u8 suspend_supported; /* Allow device to enter mode during suspend */
@@ -72,8 +79,8 @@
struct msm_pm_sleep_ops {
void *(*lowest_limits)(bool from_idle,
- enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
- uint32_t sleep_us, uint32_t *power);
+ enum msm_pm_sleep_mode sleep_mode,
+ struct msm_pm_time_params *time_param, uint32_t *power);
int (*enter_sleep)(uint32_t sclk_count, void *limits,
bool from_idle, bool notify_rpm);
void (*exit_sleep)(void *limits, bool from_idle,
diff --git a/arch/arm/mach-msm/rpm_resources.c b/arch/arm/mach-msm/rpm_resources.c
index 9d794e7..2a835f7 100644
--- a/arch/arm/mach-msm/rpm_resources.c
+++ b/arch/arm/mach-msm/rpm_resources.c
@@ -20,6 +20,7 @@
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
+#include <linux/hrtimer.h>
#include <mach/rpm.h>
#include <mach/msm_iomap.h>
#include <asm/mach-types.h>
@@ -37,6 +38,7 @@
enum {
MSM_RPMRS_DEBUG_OUTPUT = BIT(0),
MSM_RPMRS_DEBUG_BUFFER = BIT(1),
+ MSM_RPMRS_DEBUG_EVENT_TIMER = BIT(2),
};
static int msm_rpmrs_debug_mask;
@@ -891,8 +893,8 @@
}
static void *msm_rpmrs_lowest_limits(bool from_idle,
- enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
- uint32_t sleep_us, uint32_t *power)
+ enum msm_pm_sleep_mode sleep_mode,
+ struct msm_pm_time_params *time_param, uint32_t *power)
{
unsigned int cpu = smp_processor_id();
struct msm_rpmrs_level *best_level = NULL;
@@ -900,6 +902,8 @@
bool gpio_detectable = false;
int i;
uint32_t pwr;
+ uint32_t next_wakeup_us = time_param->sleep_us;
+ bool modify_event_timer;
if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
irqs_detectable = msm_mpm_irqs_detectable(from_idle);
@@ -909,16 +913,32 @@
for (i = 0; i < msm_rpmrs_level_count; i++) {
struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
+ modify_event_timer = false;
+
if (!level->available)
continue;
if (sleep_mode != level->sleep_mode)
continue;
- if (latency_us < level->latency_us)
+ if (time_param->latency_us < level->latency_us)
continue;
- if (sleep_us <= level->time_overhead_us)
+ if (time_param->next_event_us &&
+ time_param->next_event_us < level->latency_us)
+ continue;
+
+ if (time_param->next_event_us) {
+ if ((time_param->next_event_us < time_param->sleep_us)
+ || ((time_param->next_event_us - level->latency_us) <
+ time_param->sleep_us)) {
+ modify_event_timer = true;
+ next_wakeup_us = time_param->next_event_us -
+ level->latency_us;
+ }
+ }
+
+ if (next_wakeup_us <= level->time_overhead_us)
continue;
if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
@@ -929,18 +949,17 @@
if (!cpu && msm_rpm_local_request_is_outstanding())
break;
-
- if (sleep_us <= 1) {
+ if (next_wakeup_us <= 1) {
pwr = level->energy_overhead;
- } else if (sleep_us <= level->time_overhead_us) {
- pwr = level->energy_overhead / sleep_us;
- } else if ((sleep_us >> 10) > level->time_overhead_us) {
+ } else if (next_wakeup_us <= level->time_overhead_us) {
+ pwr = level->energy_overhead / next_wakeup_us;
+ } else if ((next_wakeup_us >> 10) > level->time_overhead_us) {
pwr = level->steady_state_power;
} else {
pwr = level->steady_state_power;
pwr -= (level->time_overhead_us *
- level->steady_state_power)/sleep_us;
- pwr += level->energy_overhead / sleep_us;
+ level->steady_state_power)/next_wakeup_us;
+ pwr += level->energy_overhead / next_wakeup_us;
}
if (!best_level ||
@@ -950,6 +969,12 @@
best_level = level;
if (power)
*power = pwr;
+ if (modify_event_timer && best_level->latency_us > 1)
+ time_param->modified_time_us =
+ time_param->next_event_us -
+ best_level->latency_us;
+ else
+ time_param->modified_time_us = 0;
}
}