Merge changes I4d5d73ab,I6e076344,I6cf6035d,I29faa348 into msm-3.0

* changes:
  Perf: Use CPU PM notifiers to save and restore PMU regs
  ARM: cpu_pm: Add cpu power management notifiers
  ARM: Perfevents: Add mode exclusion support for Krait P2
  Perf: Fix detection of Krait implementation events
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index be324ac..b59ad93 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -26,6 +26,7 @@
 #include <asm/irq_regs.h>
 #include <asm/pmu.h>
 #include <asm/stacktrace.h>
+#include <linux/cpu_pm.h>
 
 static struct platform_device *pmu_device;
 
@@ -82,6 +83,8 @@
 					 struct hw_perf_event *hwc);
 	u32		(*read_counter)(int idx);
 	void		(*write_counter)(int idx, u32 val);
+	int             (*set_event_filter) (struct hw_perf_event *evt,
+			struct perf_event_attr *attr);
 	void		(*start)(void);
 	void		(*stop)(void);
 	void		(*reset)(void *);
@@ -472,6 +475,13 @@
 	pmu_device = NULL;
 }
 
+static int
+event_requires_mode_exclusion(struct perf_event_attr *attr)
+{
+	return attr->exclude_idle || attr->exclude_user ||
+		attr->exclude_kernel || attr->exclude_hv;
+}
+
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmu_reserve_mutex);
 
@@ -508,17 +518,6 @@
 		return mapping;
 	}
 
-	/*
-	 * Check whether we need to exclude the counter from certain modes.
-	 * The ARM performance counters are on all of the time so if someone
-	 * has asked us for some excludes then we have to fail.
-	 */
-	if (event->attr.exclude_kernel || event->attr.exclude_user ||
-	    event->attr.exclude_hv || event->attr.exclude_idle) {
-		pr_debug("ARM performance counters do not support "
-			 "mode exclusion\n");
-		return -EPERM;
-	}
 
 	/*
 	 * We don't assign an index until we actually place the event onto
@@ -534,13 +533,23 @@
 	 * the event mapping and the counter to use. The counter to use is
 	 * also the indx and the config_base is the event type.
 	 */
-	hwc->config_base	    = (unsigned long)mapping;
-	hwc->config		    = 0;
-	hwc->event_base		    = 0;
+	hwc->config_base = 0;
+	hwc->config = 0;
+	hwc->event_base = 0;
+
+	if ((!armpmu->set_event_filter ||
+		armpmu->set_event_filter(hwc, &event->attr)) &&
+		event_requires_mode_exclusion(&event->attr)) {
+		pr_debug("ARM performance counters do not support "
+				"mode exclusion\n");
+		return -EPERM;
+	}
+
+	hwc->config_base |= (unsigned long)mapping;
 
 	if (!hwc->sample_period) {
 		hwc->sample_period  = armpmu->max_period;
-		hwc->last_period    = hwc->sample_period;
+		hwc->last_period = hwc->sample_period;
 		local64_set(&hwc->period_left, hwc->sample_period);
 	}
 
@@ -643,6 +652,30 @@
 #include "perf_event_msm_krait.c"
 #include "perf_event_msm_krait_l2.c"
 
+static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
+		void *v)
+{
+	switch (cmd) {
+	case CPU_PM_ENTER:
+		perf_pmu_disable(&pmu);
+		break;
+
+	case CPU_PM_ENTER_FAILED:
+	case CPU_PM_EXIT:
+		if (armpmu && armpmu->reset)
+			armpmu->reset(NULL);
+		perf_pmu_enable(&pmu);
+
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block perf_cpu_pm_notifier_block = {
+	.notifier_call = perf_cpu_pm_notifier,
+};
+
 /*
  * Ensure the PMU has sane values out of reset.
  * This requires SMP to be available, so exists as a separate initcall.
@@ -725,6 +758,8 @@
 
 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
 
+	cpu_pm_register_notifier(&perf_cpu_pm_notifier_block);
+
 	return 0;
 }
 early_initcall(init_hw_perf_events);
diff --git a/arch/arm/kernel/perf_event_msm.c b/arch/arm/kernel/perf_event_msm.c
index 1c2206f..b40a226 100644
--- a/arch/arm/kernel/perf_event_msm.c
+++ b/arch/arm/kernel/perf_event_msm.c
@@ -585,6 +585,8 @@
 	 */
 	if (idx != ARMV7_CYCLE_COUNTER) {
 		val = hwc->config_base;
+		val &= ARMV7_EVTYPE_EVENT;
+
 		if (val > 0x40) {
 			event = get_scorpion_evtinfo(val, &evtinfo);
 			if (event == -EINVAL)
@@ -624,6 +626,8 @@
 	 */
 	if (idx != ARMV7_CYCLE_COUNTER) {
 		val = hwc->config_base;
+		val &= ARMV7_EVTYPE_EVENT;
+
 		if (val < 0x40) {
 			armv7_pmnc_write_evtsel(idx, hwc->config_base);
 		} else {
@@ -682,6 +686,26 @@
 }
 #endif
 
+static void scorpion_pmu_reset(void *info)
+{
+	u32 idx, nb_cnt = armpmu->num_events;
+
+	/* Stop all counters and their interrupts */
+	for (idx = 1; idx < nb_cnt; ++idx) {
+		armv7_pmnc_disable_counter(idx);
+		armv7_pmnc_disable_intens(idx);
+	}
+
+	/* Clear all pmresrs */
+	scorpion_clear_pmuregs();
+
+	/* Reset irq stat reg */
+	armv7_pmnc_getreset_flags();
+
+	/* Reset all ctrs to 0 */
+	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+}
+
 static struct arm_pmu scorpion_pmu = {
 	.handle_irq		= armv7pmu_handle_irq,
 #ifdef CONFIG_SMP
@@ -696,6 +720,7 @@
 	.get_event_idx		= armv7pmu_get_event_idx,
 	.start			= armv7pmu_start,
 	.stop			= armv7pmu_stop,
+	.reset			= scorpion_pmu_reset,
 	.max_period		= (1LLU << 32) - 1,
 };
 
diff --git a/arch/arm/kernel/perf_event_msm_krait.c b/arch/arm/kernel/perf_event_msm_krait.c
index e3e9bf2..cf1f1ee 100644
--- a/arch/arm/kernel/perf_event_msm_krait.c
+++ b/arch/arm/kernel/perf_event_msm_krait.c
@@ -33,6 +33,9 @@
 #define KRAIT_P2_L1_ITLB_ACCESS 0x12222
 #define KRAIT_P2_L1_DTLB_ACCESS 0x12210
 
+#define KRAIT_EVENT_MASK 0xfffff
+#define KRAIT_MODE_EXCL_MASK 0xc0000000
+
 u32 evt_type_base[][4] = {
 	{0x4c, 0x50, 0x54},		/* Pass 1 */
 	{0xcc, 0xd0, 0xd4, 0xd8},	/* Pass 2 */
@@ -198,7 +201,7 @@
 	if ((group > 3) || (reg > krait_max_l1_reg))
 		return -EINVAL;
 
-	if (prefix != KRAIT_EVT_PREFIX || prefix != KRAIT_VENUMEVT_PREFIX)
+	if (prefix != KRAIT_EVT_PREFIX && prefix != KRAIT_VENUMEVT_PREFIX)
 		return -EINVAL;
 
 	if (prefix == KRAIT_VENUMEVT_PREFIX) {
@@ -391,6 +394,8 @@
 	 */
 	if (idx != ARMV7_CYCLE_COUNTER) {
 		val = hwc->config_base;
+		val &= KRAIT_EVENT_MASK;
+
 		if (val > 0x40) {
 			event = get_krait_evtinfo(val, &evtinfo);
 			if (event == -EINVAL)
@@ -430,6 +435,8 @@
 	 */
 	if (idx != ARMV7_CYCLE_COUNTER) {
 		val = hwc->config_base;
+		val &= KRAIT_EVENT_MASK;
+
 		if (val < 0x40) {
 			armv7_pmnc_write_evtsel(idx, hwc->config_base);
 		} else {
@@ -437,6 +444,10 @@
 
 			if (event == -EINVAL)
 				goto krait_out;
+
+			/* Restore Mode-exclusion bits */
+			event |= (hwc->config_base & KRAIT_MODE_EXCL_MASK);
+
 			/*
 			 * Set event (if destined for PMNx counters)
 			 * We don't need to set the event if it's a cycle count
@@ -461,6 +472,26 @@
 	raw_spin_unlock_irqrestore(&pmu_lock, flags);
 }
 
+static void krait_pmu_reset(void *info)
+{
+	u32 idx, nb_cnt = armpmu->num_events;
+
+	/* Stop all counters and their interrupts */
+	for (idx = 1; idx < nb_cnt; ++idx) {
+		armv7_pmnc_disable_counter(idx);
+		armv7_pmnc_disable_intens(idx);
+	}
+
+	/* Clear all pmresrs */
+	krait_clear_pmuregs();
+
+	/* Reset irq stat reg */
+	armv7_pmnc_getreset_flags();
+
+	/* Reset all ctrs to 0 */
+	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+}
+
 static struct arm_pmu krait_pmu = {
 	.handle_irq		= armv7pmu_handle_irq,
 #ifdef CONFIG_SMP
@@ -475,6 +506,7 @@
 	.get_event_idx		= armv7pmu_get_event_idx,
 	.start			= armv7pmu_start,
 	.stop			= armv7pmu_stop,
+	.reset			= krait_pmu_reset,
 	.max_period		= (1LLU << 32) - 1,
 };
 
@@ -505,6 +537,9 @@
 	if (krait_ver > 0) {
 		evt_index = 1;
 		krait_max_l1_reg = 3;
+
+		krait_pmu.set_event_filter = armv7pmu_set_event_filter,
+
 		armv7_krait_perf_cache_map[C(ITLB)]
 			[C(OP_READ)]
 			[C(RESULT_ACCESS)] = KRAIT_P2_L1_ITLB_ACCESS;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4031c7b..277f1ce 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -715,7 +715,8 @@
 /*
  * EVTSEL: Event selection reg
  */
-#define	ARMV7_EVTSEL_MASK	0xff		/* Mask for writable bits */
+#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for writable bits */
+#define ARMV7_EVTYPE_MASK	0xc00000ff
 
 /*
  * SELECT: Counter selection reg
@@ -730,6 +731,39 @@
 #define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 #define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 
+/*
+ * Event filters for PMUv2
+ */
+#define        ARMV7_EXCLUDE_PL1       (1 << 31)
+#define        ARMV7_EXCLUDE_USER      (1 << 30)
+#define        ARMV7_INCLUDE_HYP       (1 << 27)
+
+/*
+ * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ */
+int armv7pmu_set_event_filter(struct hw_perf_event *event,
+		struct perf_event_attr *attr)
+{
+	unsigned long config_base = 0;
+
+	if (attr->exclude_idle)
+		return -EPERM;
+	if (attr->exclude_user)
+		config_base |= ARMV7_EXCLUDE_USER;
+	if (attr->exclude_kernel)
+		config_base |= ARMV7_EXCLUDE_PL1;
+	if (!attr->exclude_hv)
+		config_base |= ARMV7_INCLUDE_HYP;
+
+	/*
+	 * Install the filter into config_base as this is used to
+	 * construct the event type.
+	 */
+	event->config_base = config_base;
+
+	return 0;
+}
+
 static inline unsigned long armv7_pmnc_read(void)
 {
 	u32 val;
@@ -815,7 +849,8 @@
 static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
 {
 	if (armv7_pmnc_select_counter(idx) == idx) {
-		val &= ARMV7_EVTSEL_MASK;
+		val &= ARMV7_EVTYPE_MASK;
+
 		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 	}
 }
@@ -976,7 +1011,7 @@
 	 * Set event (if destined for PMNx counters)
 	 * We don't need to set the event if it's a cycle count
 	 */
-	if (idx != ARMV7_CYCLE_COUNTER)
+	if (armpmu->set_event_filter || idx != ARMV7_CYCLE_COUNTER)
 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
 
 	/*
@@ -1101,9 +1136,11 @@
 				  struct hw_perf_event *event)
 {
 	int idx;
+	unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
+
 
 	/* Always place a cycle counter into the cycle counter. */
-	if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
+	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
 		if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
 			return -EAGAIN;
 
diff --git a/arch/arm/mach-msm/cpuidle.c b/arch/arm/mach-msm/cpuidle.c
index 4f22feb..6738955 100644
--- a/arch/arm/mach-msm/cpuidle.c
+++ b/arch/arm/mach-msm/cpuidle.c
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
 
 #include "cpuidle.h"
 #include "pm.h"
@@ -61,8 +62,15 @@
 	atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_ENTER, NULL);
 #endif
 
+#ifdef CONFIG_CPU_PM
+	cpu_pm_enter();
+#endif
 	ret = msm_pm_idle_enter((enum msm_pm_sleep_mode) (state->driver_data));
 
+#ifdef CONFIG_CPU_PM
+	cpu_pm_exit();
+#endif
+
 #ifdef CONFIG_MSM_SLEEP_STATS
 	atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_EXIT, NULL);
 #endif
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
new file mode 100644
index 0000000..a165fd7
--- /dev/null
+++ b/include/linux/cpu_pm.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ *	Colin Cross <ccross <at> android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPU_PM_H
+#define _LINUX_CPU_PM_H
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+/*
+ * When a CPU goes to a low power state that turns off power to the CPU's
+ * power domain, the contents of some blocks (floating point coprocessors,
+ * interrutp controllers, caches, timers) in the same power domain can
+ * be lost.  The cpm_pm notifiers provide a method for platform idle, suspend,
+ * and hotplug implementations to notify the drivers for these blocks that
+ * they may be reset.
+ *
+ * All cpu_pm notifications must be called with interrupts disabled.
+ *
+ * The notifications are split into two classes, CPU notifications and CPU
+ * cluster notifications.
+ *
+ * CPU notifications apply to a single CPU, and must be called on the affected
+ * CPU.  They are used to save per-cpu context for affected blocks.
+ *
+ * CPU cluster notifications apply to all CPUs in a single power domain. They
+ * are used to save any global context for affected blocks, and must be called
+ * after all the CPUs in the power domain have been notified of the low power
+ * state.
+ *
+ */
+
+/*
+ * Event codes passed as unsigned long val to notifier calls
+ */
+enum cpu_pm_event {
+	/* A single cpu is entering a low power state */
+	CPU_PM_ENTER,
+
+	/* A single cpu failed to enter a low power state */
+	CPU_PM_ENTER_FAILED,
+
+	/* A single cpu is exiting a low power state */
+	CPU_PM_EXIT,
+
+	/* A cpu power domain is entering a low power state */
+	CPU_CLUSTER_PM_ENTER,
+
+	/* A cpu power domain failed to enter a low power state */
+	CPU_CLUSTER_PM_ENTER_FAILED,
+
+	/* A cpu power domain is exiting a low power state */
+	CPU_CLUSTER_PM_EXIT,
+};
+
+int cpu_pm_register_notifier(struct notifier_block *nb);
+int cpu_pm_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * cpm_pm_enter
+ *
+ * Notifies listeners that a single cpu is entering a low power state that may
+ * cause some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called on the affected cpu with interrupts disabled.  Platform is
+ * responsible for ensuring that cpu_pm_enter is not called twice on the same
+ * cpu before cpu_pm_exit is called.
+ */
+int cpu_pm_enter(void);
+
+/*
+ * cpm_pm_exit
+ *
+ * Notifies listeners that a single cpu is exiting a low power state that may
+ * have caused some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called on the affected cpu with interrupts disabled.
+ */
+int cpu_pm_exit(void);
+
+/*
+ * cpm_cluster_pm_enter
+ *
+ * Notifies listeners that all cpus in a power domain are entering a low power
+ * state that may cause some blocks in the same power domain to reset.
+ *
+ * Must be called after cpu_pm_enter has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain.
+ *
+ * Must be called with interrupts disabled.
+ */
+int cpu_cluster_pm_enter(void);
+
+/*
+ * cpm_pm_enter
+ *
+ * Notifies listeners that a single cpu is entering a low power state that may
+ * cause some blocks in the same power domain as the cpu to reset.
+ *
+ * Must be called after cpu_pm_enter has been called on all cpus in the power
+ * domain, and before cpu_pm_exit has been called on any cpu in the power
+ * domain.
+ *
+ * Must be called with interrupts disabled.
+ */
+int cpu_cluster_pm_exit(void);
+
+#endif
diff --git a/kernel/Makefile b/kernel/Makefile
index d06467f..63bf7a5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -101,6 +101,7 @@
 obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_SMP) += sched_cpupri.o
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
+obj-$(CONFIG_CPU_PM) += cpu_pm.o
 
 obj-$(CONFIG_PERF_EVENTS) += events/
 
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
new file mode 100644
index 0000000..1cd23b8
--- /dev/null
+++ b/kernel/cpu_pm.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ *	Colin Cross <ccross <at> android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu_pm.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+
+static DEFINE_RWLOCK(cpu_pm_notifier_lock);
+static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+
+int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+	unsigned long flags;
+	int ret;
+
+	write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+	ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+	write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+	unsigned long flags;
+	int ret;
+
+	write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+	ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+	write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+
+static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+{
+	int ret;
+
+	ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+		nr_to_call, nr_calls);
+
+	return notifier_to_errno(ret);
+}
+
+int cpu_pm_enter(void)
+{
+	int nr_calls;
+	int ret = 0;
+
+	read_lock(&cpu_pm_notifier_lock);
+	ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+	if (ret)
+		cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+	read_unlock(&cpu_pm_notifier_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_enter);
+
+int cpu_pm_exit(void)
+{
+	int ret;
+
+	read_lock(&cpu_pm_notifier_lock);
+	ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+	read_unlock(&cpu_pm_notifier_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_pm_exit);
+
+int cpu_cluster_pm_enter(void)
+{
+	int nr_calls;
+	int ret = 0;
+
+	read_lock(&cpu_pm_notifier_lock);
+	ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+	if (ret)
+		cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+	read_unlock(&cpu_pm_notifier_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+
+int cpu_cluster_pm_exit(void)
+{
+	int ret;
+
+	read_lock(&cpu_pm_notifier_lock);
+	ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+	read_unlock(&cpu_pm_notifier_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index b90fb99..ac4641e 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -24,6 +24,10 @@
 config HAS_EARLYSUSPEND
 	bool
 
+config CPU_PM
+	def_bool y
+	depends on SUSPEND || CPU_IDLE
+
 config WAKELOCK
 	bool "Wake lock"
 	depends on PM && RTC_CLASS