Merge branch 'pm-domains' into pm-for-linus

* pm-domains:
  PM / Domains: Split device PM domain data into base and need_restore
  ARM: mach-shmobile: sh7372 sleep warning fixes
  ARM: mach-shmobile: sh7372 A3SM support
  ARM: mach-shmobile: sh7372 generic suspend/resume support
  PM / Domains: Preliminary support for devices with power.irq_safe set
  PM: Move clock-related definitions and headers to separate file
  PM / Domains: Use power.sybsys_data to reduce overhead
  PM: Reference counting of power.subsys_data
  PM: Introduce struct pm_subsys_data
  ARM / shmobile: Make A3RV be a subdomain of A4LC on SH7372
  PM / Domains: Rename argument of pm_genpd_add_subdomain()
  PM / Domains: Rename GPD_STATE_WAIT_PARENT to GPD_STATE_WAIT_MASTER
  PM / Domains: Allow generic PM domains to have multiple masters
  PM / Domains: Add "wait for parent" status for generic PM domains
  PM / Domains: Make pm_genpd_poweron() always survive parent removal
  PM / Domains: Do not take parent locks to modify subdomain counters
  PM / Domains: Implement subdomain counters as atomic fields
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 943072d..7868e75 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
 #include <linux/clk.h>
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 523f608..d6c8ae8 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -42,6 +42,7 @@
 #include <linux/leds.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/usb/r8a66597.h>
+#include <linux/pm_clock.h>
 
 #include <media/sh_mobile_ceu.h>
 #include <media/sh_mobile_csi2.h>
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 17c19dc..19f5d49 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -39,7 +39,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
 #include <linux/smsc911x.h>
 #include <linux/sh_intc.h>
 #include <linux/tca6416_keypad.h>
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 06aecb3..c0cdbf9 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -35,8 +35,8 @@
 extern void sh7372_clock_init(void);
 extern void sh7372_pinmux_init(void);
 extern void sh7372_pm_init(void);
-extern void sh7372_cpu_suspend(void);
-extern void sh7372_cpu_resume(void);
+extern void sh7372_resume_core_standby_a3sm(void);
+extern int sh7372_do_idle_a3sm(unsigned long unused);
 extern struct clk sh7372_extal1_clk;
 extern struct clk sh7372_extal2_clk;
 
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 24e63a8..efc984c 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -498,9 +498,12 @@
 extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
 extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
 					struct platform_device *pdev);
+extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
+				    struct sh7372_pm_domain *sh7372_sd);
 #else
 #define sh7372_init_pm_domain(pd) do { } while(0)
 #define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
+#define sh7372_pm_add_subdomain(pd, sd) do { } while(0)
 #endif /* CONFIG_PM */
 
 #endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 933fb41..8e0944f 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -15,23 +15,60 @@
 #include <linux/list.h>
 #include <linux/err.h>
 #include <linux/slab.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/bitrev.h>
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/tlbflush.h>
+#include <asm/suspend.h>
 #include <mach/common.h>
 #include <mach/sh7372.h>
 
-#define SMFRAM 0xe6a70000
-#define SYSTBCR 0xe6150024
-#define SBAR 0xe6180020
-#define APARMBAREA 0xe6f10020
+/* DBG */
+#define DBGREG1 0xe6100020
+#define DBGREG9 0xe6100040
 
+/* CPGA */
+#define SYSTBCR 0xe6150024
+#define MSTPSR0 0xe6150030
+#define MSTPSR1 0xe6150038
+#define MSTPSR2 0xe6150040
+#define MSTPSR3 0xe6150048
+#define MSTPSR4 0xe615004c
+#define PLLC01STPCR 0xe61500c8
+
+/* SYSC */
 #define SPDCR 0xe6180008
 #define SWUCR 0xe6180014
+#define SBAR 0xe6180020
+#define WUPSMSK 0xe618002c
+#define WUPSMSK2 0xe6180048
 #define PSTR 0xe6180080
+#define WUPSFAC 0xe6180098
+#define IRQCR 0xe618022c
+#define IRQCR2 0xe6180238
+#define IRQCR3 0xe6180244
+#define IRQCR4 0xe6180248
+#define PDNSEL 0xe6180254
+
+/* INTC */
+#define ICR1A 0xe6900000
+#define ICR2A 0xe6900004
+#define ICR3A 0xe6900008
+#define ICR4A 0xe690000c
+#define INTMSK00A 0xe6900040
+#define INTMSK10A 0xe6900044
+#define INTMSK20A 0xe6900048
+#define INTMSK30A 0xe690004c
+
+/* MFIS */
+#define SMFRAM 0xe6a70000
+
+/* AP-System Core */
+#define APARMBAREA 0xe6f10020
 
 #define PSTR_RETRIES 100
 #define PSTR_DELAY_US 10
@@ -91,35 +128,6 @@
 	return ret;
 }
 
-static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
-{
-	int ret = pd_power_up(genpd);
-
-	/* force A4LC on after A3RV has been requested on */
-	pm_genpd_poweron(&sh7372_a4lc.genpd);
-
-	return ret;
-}
-
-static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
-{
-	int ret = pd_power_down(genpd);
-
-	/* try to power down A4LC after A3RV is requested off */
-	genpd_queue_power_off_work(&sh7372_a4lc.genpd);
-
-	return ret;
-}
-
-static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
-{
-	/* only power down A4LC if A3RV is off */
-	if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
-		return pd_power_down(genpd);
-
-	return -EBUSY;
-}
-
 static bool pd_active_wakeup(struct device *dev)
 {
 	return true;
@@ -132,18 +140,10 @@
 	pm_genpd_init(genpd, NULL, false);
 	genpd->stop_device = pm_clk_suspend;
 	genpd->start_device = pm_clk_resume;
+	genpd->dev_irq_safe = true;
 	genpd->active_wakeup = pd_active_wakeup;
-
-	if (sh7372_pd == &sh7372_a4lc) {
-		genpd->power_off = pd_power_down_a4lc;
-		genpd->power_on = pd_power_up;
-	} else if (sh7372_pd == &sh7372_a3rv) {
-		genpd->power_off = pd_power_down_a3rv;
-		genpd->power_on = pd_power_up_a3rv;
-	} else {
-		genpd->power_off = pd_power_down;
-		genpd->power_on = pd_power_up;
-	}
+	genpd->power_off = pd_power_down;
+	genpd->power_on = pd_power_up;
 	genpd->power_on(&sh7372_pd->genpd);
 }
 
@@ -152,11 +152,15 @@
 {
 	struct device *dev = &pdev->dev;
 
-	if (!dev->power.subsys_data) {
-		pm_clk_init(dev);
-		pm_clk_add(dev, NULL);
-	}
 	pm_genpd_add_device(&sh7372_pd->genpd, dev);
+	if (pm_clk_no_clocks(dev))
+		pm_clk_add(dev, NULL);
+}
+
+void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
+			     struct sh7372_pm_domain *sh7372_sd)
+{
+	pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd);
 }
 
 struct sh7372_pm_domain sh7372_a4lc = {
@@ -185,33 +189,175 @@
 
 #endif /* CONFIG_PM */
 
-static void sh7372_enter_core_standby(void)
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
+static int sh7372_do_idle_core_standby(unsigned long unused)
 {
-	void __iomem *smfram = (void __iomem *)SMFRAM;
-
-	__raw_writel(0, APARMBAREA); /* translate 4k */
-	__raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
-	__raw_writel(0x10, SYSTBCR); /* enable core standby */
-
-	__raw_writel(0, smfram + 0x3c); /* clear page table address */
-
-	sh7372_cpu_suspend();
-	cpu_init();
-
-	/* if page table address is non-NULL then we have been powered down */
-	if (__raw_readl(smfram + 0x3c)) {
-		__raw_writel(__raw_readl(smfram + 0x40),
-			     __va(__raw_readl(smfram + 0x3c)));
-
-		flush_tlb_all();
-		set_cr(__raw_readl(smfram + 0x38));
-	}
-
-	__raw_writel(0, SYSTBCR); /* disable core standby */
-	__raw_writel(0, SBAR); /* disable reset vector translation */
+	cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
+	return 0;
 }
 
+static void sh7372_enter_core_standby(void)
+{
+	/* set reset vector, translate 4k */
+	__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
+	__raw_writel(0, APARMBAREA);
+
+	/* enter sleep mode with SYSTBCR to 0x10 */
+	__raw_writel(0x10, SYSTBCR);
+	cpu_suspend(0, sh7372_do_idle_core_standby);
+	__raw_writel(0, SYSTBCR);
+
+	 /* disable reset vector translation */
+	__raw_writel(0, SBAR);
+}
+#endif
+
+#ifdef CONFIG_SUSPEND
+static void sh7372_enter_a3sm_common(int pllc0_on)
+{
+	/* set reset vector, translate 4k */
+	__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
+	__raw_writel(0, APARMBAREA);
+
+	if (pllc0_on)
+		__raw_writel(0, PLLC01STPCR);
+	else
+		__raw_writel(1 << 28, PLLC01STPCR);
+
+	__raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
+	__raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
+	cpu_suspend(0, sh7372_do_idle_a3sm);
+	__raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
+
+	 /* disable reset vector translation */
+	__raw_writel(0, SBAR);
+}
+
+static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p)
+{
+	unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
+	unsigned long msk, msk2;
+
+	/* check active clocks to determine potential wakeup sources */
+
+	mstpsr0 = __raw_readl(MSTPSR0);
+	if ((mstpsr0 & 0x00000003) != 0x00000003) {
+		pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0);
+		return 0;
+	}
+
+	mstpsr1 = __raw_readl(MSTPSR1);
+	if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) {
+		pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1);
+		return 0;
+	}
+
+	mstpsr2 = __raw_readl(MSTPSR2);
+	if ((mstpsr2 & 0x000741ff) != 0x000741ff) {
+		pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2);
+		return 0;
+	}
+
+	mstpsr3 = __raw_readl(MSTPSR3);
+	if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) {
+		pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3);
+		return 0;
+	}
+
+	mstpsr4 = __raw_readl(MSTPSR4);
+	if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) {
+		pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4);
+		return 0;
+	}
+
+	msk = 0;
+	msk2 = 0;
+
+	/* make bitmaps of limited number of wakeup sources */
+
+	if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */
+		msk |= 1 << 31;
+
+	if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */
+		msk |= 1 << 21;
+
+	if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */
+		msk |= 1 << 2;
+
+	if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */
+		msk |= 1 << 1;
+
+	if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */
+		msk |= 1 << 1;
+
+	if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */
+		msk |= 1 << 1;
+
+	if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */
+		msk2 |= 1 << 17;
+
+	*mskp = msk;
+	*msk2p = msk2;
+
+	return 1;
+}
+
+static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
+{
+	u16 tmp, irqcr1, irqcr2;
+	int k;
+
+	irqcr1 = 0;
+	irqcr2 = 0;
+
+	/* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */
+	for (k = 0; k <= 7; k++) {
+		tmp = (icr >> ((7 - k) * 4)) & 0xf;
+		irqcr1 |= (tmp & 0x03) << (k * 2);
+		irqcr2 |= (tmp >> 2) << (k * 2);
+	}
+
+	*irqcr1p = irqcr1;
+	*irqcr2p = irqcr2;
+}
+
+static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
+{
+	u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
+	unsigned long tmp;
+
+	/* read IRQ0A -> IRQ15A mask */
+	tmp = bitrev8(__raw_readb(INTMSK00A));
+	tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8;
+
+	/* setup WUPSMSK from clocks and external IRQ mask */
+	msk = (~msk & 0xc030000f) | (tmp << 4);
+	__raw_writel(msk, WUPSMSK);
+
+	/* propage level/edge trigger for external IRQ 0->15 */
+	sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low);
+	sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high);
+	__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR);
+	__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2);
+
+	/* read IRQ16A -> IRQ31A mask */
+	tmp = bitrev8(__raw_readb(INTMSK20A));
+	tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8;
+
+	/* setup WUPSMSK2 from clocks and external IRQ mask */
+	msk2 = (~msk2 & 0x00030000) | tmp;
+	__raw_writel(msk2, WUPSMSK2);
+
+	/* propage level/edge trigger for external IRQ 16->31 */
+	sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low);
+	sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high);
+	__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
+	__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
+}
+#endif
+
 #ifdef CONFIG_CPU_IDLE
+
 static void sh7372_cpuidle_setup(struct cpuidle_device *dev)
 {
 	struct cpuidle_state *state;
@@ -239,9 +385,25 @@
 #endif
 
 #ifdef CONFIG_SUSPEND
+
 static int sh7372_enter_suspend(suspend_state_t suspend_state)
 {
-	sh7372_enter_core_standby();
+	unsigned long msk, msk2;
+
+	/* check active clocks to determine potential wakeup sources */
+	if (sh7372_a3sm_valid(&msk, &msk2)) {
+
+		/* convert INTC mask and sense to SYSC mask and sense */
+		sh7372_setup_a3sm(msk, msk2);
+
+		/* enter A3SM sleep with PLLC0 off */
+		pr_debug("entering A3SM\n");
+		sh7372_enter_a3sm_common(0);
+	} else {
+		/* default to Core Standby that supports all wakeup sources */
+		pr_debug("entering Core Standby\n");
+		sh7372_enter_core_standby();
+	}
 	return 0;
 }
 
@@ -253,9 +415,6 @@
 static void sh7372_suspend_init(void) {}
 #endif
 
-#define DBGREG1 0xe6100020
-#define DBGREG9 0xe6100040
-
 void __init sh7372_pm_init(void)
 {
 	/* enable DBG hardware block to kick SYSC */
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 6ec454e..bd5c6a3 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_domain.h>
+#include <linux/pm_clock.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/sh_clk.h>
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 2d9b1b1..d317c22 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -30,6 +30,7 @@
 #include <linux/sh_dma.h>
 #include <linux/sh_intc.h>
 #include <linux/sh_timer.h>
+#include <linux/pm_domain.h>
 #include <mach/hardware.h>
 #include <mach/sh7372.h>
 #include <asm/mach-types.h>
@@ -994,6 +995,8 @@
 	sh7372_init_pm_domain(&sh7372_a3ri);
 	sh7372_init_pm_domain(&sh7372_a3sg);
 
+	sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
+
 	platform_add_devices(sh7372_early_devices,
 			    ARRAY_SIZE(sh7372_early_devices));
 
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index d37d3ca..f3ab3c5 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -30,58 +30,20 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/memory.h>
 #include <asm/assembler.h>
 
-#define SMFRAM 0xe6a70000
+#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
+	.align	12
+	.text
+	.global sh7372_resume_core_standby_a3sm
+sh7372_resume_core_standby_a3sm:
+	ldr     pc, 1f
+1:	.long   cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
 
-	.align
-kernel_flush:
-	.word	v7_flush_dcache_all
-
-	.align	3
-ENTRY(sh7372_cpu_suspend)
-	stmfd	sp!, {r0-r12, lr}	@ save registers on stack
-
-	ldr	r8, =SMFRAM
-
-	mov	r4, sp			@ Store sp
-	mrs	r5, spsr		@ Store spsr
-	mov	r6, lr			@ Store lr
-	stmia	r8!, {r4-r6}
-
-	mrc	p15, 0, r4, c1, c0, 2	@ Coprocessor access control register
-	mrc	p15, 0, r5, c2, c0, 0	@ TTBR0
-	mrc	p15, 0, r6, c2, c0, 1	@ TTBR1
-	mrc	p15, 0, r7, c2, c0, 2	@ TTBCR
-	stmia	r8!, {r4-r7}
-
-	mrc	p15, 0, r4, c3, c0, 0	@ Domain access Control Register
-	mrc	p15, 0, r5, c10, c2, 0	@ PRRR
-	mrc	p15, 0, r6, c10, c2, 1	@ NMRR
-	stmia	r8!,{r4-r6}
-
-	mrc	p15, 0, r4, c13, c0, 1	@ Context ID
-	mrc	p15, 0, r5, c13, c0, 2	@ User r/w thread and process ID
-	mrc	p15, 0, r6, c12, c0, 0	@ Secure or NS vector base address
-	mrs	r7, cpsr		@ Store current cpsr
-	stmia	r8!, {r4-r7}
-
-	mrc	p15, 0, r4, c1, c0, 0	@ save control register
-	stmia	r8!, {r4}
-
-	/*
-	 * jump out to kernel flush routine
-	 *  - reuse that code is better
-	 *  - it executes in a cached space so is faster than refetch per-block
-	 *  - should be faster and will change with kernel
-	 *  - 'might' have to copy address, load and jump to it
-	 * Flush all data from the L1 data cache before disabling
-	 * SCTLR.C bit.
-	 */
-	ldr	r1, kernel_flush
-	mov	lr, pc
-	bx	r1
-
+	.global	sh7372_do_idle_a3sm
+sh7372_do_idle_a3sm:
 	/*
 	 * Clear the SCTLR.C bit to prevent further data cache
 	 * allocation. Clearing SCTLR.C would make all the data accesses
@@ -92,10 +54,13 @@
 	mcr	p15, 0, r0, c1, c0, 0
 	isb
 
+	/* disable L2 cache in the aux control register */
+	mrc     p15, 0, r10, c1, c0, 1
+	bic     r10, r10, #2
+	mcr     p15, 0, r10, c1, c0, 1
+
 	/*
-	 * Invalidate L1 data cache. Even though only invalidate is
-	 * necessary exported flush API is used here. Doing clean
-	 * on already clean cache would be almost NOP.
+	 * Invalidate data cache again.
 	 */
 	ldr	r1, kernel_flush
 	blx	r1
@@ -115,146 +80,16 @@
 	dsb
 	dmb
 
-/*
- * ===================================
- * == WFI instruction => Enter idle ==
- * ===================================
- */
-	wfi				@ wait for interrupt
+#define SPDCR 0xe6180008
+#define A3SM (1 << 12)
 
-/*
- * ===================================
- * == Resume path for non-OFF modes ==
- * ===================================
- */
-	mrc	p15, 0, r0, c1, c0, 0
-	tst	r0, #(1 << 2)		@ Check C bit enabled?
-	orreq	r0, r0, #(1 << 2)	@ Enable the C bit if cleared
-	mcreq	p15, 0, r0, c1, c0, 0
-	isb
+	/* A3SM power down */
+	ldr     r0, =SPDCR
+	ldr     r1, =A3SM
+	str     r1, [r0]
+1:
+	b      1b
 
-/*
- * ===================================
- * == Exit point from non-OFF modes ==
- * ===================================
- */
-	ldmfd	sp!, {r0-r12, pc}	@ restore regs and return
-
-	.pool
-
-	.align	12
-	.text
-	.global	sh7372_cpu_resume
-sh7372_cpu_resume:
-
-	mov	r1, #0
-	/*
-	 * Invalidate all instruction caches to PoU
-	 * and flush branch target cache
-	 */
-	mcr	p15, 0, r1, c7, c5, 0
-
-	ldr	r3, =SMFRAM
-
-	ldmia	r3!, {r4-r6}
-	mov	sp, r4			@ Restore sp
-	msr	spsr_cxsf, r5		@ Restore spsr
-	mov	lr, r6			@ Restore lr
-
-	ldmia	r3!, {r4-r7}
-	mcr	p15, 0, r4, c1, c0, 2	@ Coprocessor access Control Register
-	mcr	p15, 0, r5, c2, c0, 0	@ TTBR0
-	mcr	p15, 0, r6, c2, c0, 1	@ TTBR1
-	mcr	p15, 0, r7, c2, c0, 2	@ TTBCR
-
-	ldmia	r3!,{r4-r6}
-	mcr	p15, 0, r4, c3, c0, 0	@ Domain access Control Register
-	mcr	p15, 0, r5, c10, c2, 0	@ PRRR
-	mcr	p15, 0, r6, c10, c2, 1	@ NMRR
-
-	ldmia	r3!,{r4-r7}
-	mcr	p15, 0, r4, c13, c0, 1	@ Context ID
-	mcr	p15, 0, r5, c13, c0, 2	@ User r/w thread and process ID
-	mrc	p15, 0, r6, c12, c0, 0	@ Secure or NS vector base address
-	msr	cpsr, r7		@ store cpsr
-
-	/* Starting to enable MMU here */
-	mrc	p15, 0, r7, c2, c0, 2 	@ Read TTBRControl
-	/* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
-	and	r7, #0x7
-	cmp	r7, #0x0
-	beq	usettbr0
-ttbr_error:
-	/*
-	 * More work needs to be done to support N[0:2] value other than 0
-	 * So looping here so that the error can be detected
-	 */
-	b	ttbr_error
-
-	.align
-cache_pred_disable_mask:
-	.word	0xFFFFE7FB
-ttbrbit_mask:
-	.word	0xFFFFC000
-table_index_mask:
-	.word	0xFFF00000
-table_entry:
-	.word	0x00000C02
-usettbr0:
-
-	mrc	p15, 0, r2, c2, c0, 0
-	ldr	r5, ttbrbit_mask
-	and	r2, r5
-	mov	r4, pc
-	ldr	r5, table_index_mask
-	and	r4, r5			@ r4 = 31 to 20 bits of pc
-	/* Extract the value to be written to table entry */
-	ldr	r6, table_entry
-	/* r6 has the value to be written to table entry */
-	add	r6, r6, r4
-	/* Getting the address of table entry to modify */
-	lsr	r4, #18
-	/* r2 has the location which needs to be modified */
-	add	r2, r4
-	ldr	r4, [r2]
-	str	r6, [r2] /* modify the table entry */
-
-	mov	r7, r6
-	mov	r5, r2
-	mov	r6, r4
-	/* r5 = original page table address */
-	/* r6 = original page table data */
-
-	mov	r0, #0
-	mcr	p15, 0, r0, c7, c5, 4	@ Flush prefetch buffer
-	mcr	p15, 0, r0, c7, c5, 6	@ Invalidate branch predictor array
-	mcr	p15, 0, r0, c8, c5, 0	@ Invalidate instruction TLB
-	mcr	p15, 0, r0, c8, c6, 0	@ Invalidate data TLB
-
-	/*
-	 * Restore control register. This enables the MMU.
-	 * The caches and prediction are not enabled here, they
-	 * will be enabled after restoring the MMU table entry.
-	 */
-	ldmia	r3!, {r4}
-	stmia	r3!, {r5} /* save original page table address */
-	stmia	r3!, {r6} /* save original page table data */
-	stmia	r3!, {r7} /* save modified page table data */
-
-	ldr	r2, cache_pred_disable_mask
-	and	r4, r2
-	mcr	p15, 0, r4, c1, c0, 0
-	dsb
-	isb
-
-	ldr     r0, =restoremmu_on
-	bx      r0
-
-/*
- * ==============================
- * == Exit point from OFF mode ==
- * ==============================
- */
-restoremmu_on:
-
-	ldmfd	sp!, {r0-r12, pc}	@ restore regs and return
+kernel_flush:
+	.word v7_flush_dcache_all
+#endif
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2639ae7..6488ce1 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o
+obj-$(CONFIG_PM)	+= sysfs.o generic_ops.o common.o
 obj-$(CONFIG_PM_SLEEP)	+= main.o wakeup.o
 obj-$(CONFIG_PM_RUNTIME)	+= runtime.o
 obj-$(CONFIG_PM_TRACE_RTC)	+= trace.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index b97294e..b876e60 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -10,18 +10,13 @@
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/pm.h>
-#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 
 #ifdef CONFIG_PM
 
-struct pm_clk_data {
-	struct list_head clock_list;
-	spinlock_t lock;
-};
-
 enum pce_status {
 	PCE_STATUS_NONE = 0,
 	PCE_STATUS_ACQUIRED,
@@ -36,11 +31,6 @@
 	enum pce_status status;
 };
 
-static struct pm_clk_data *__to_pcd(struct device *dev)
-{
-	return dev ? dev->power.subsys_data : NULL;
-}
-
 /**
  * pm_clk_acquire - Acquire a device clock.
  * @dev: Device whose clock is to be acquired.
@@ -67,10 +57,10 @@
  */
 int pm_clk_add(struct device *dev, const char *con_id)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 
-	if (!pcd)
+	if (!psd)
 		return -EINVAL;
 
 	ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -91,9 +81,9 @@
 
 	pm_clk_acquire(dev, ce);
 
-	spin_lock_irq(&pcd->lock);
-	list_add_tail(&ce->node, &pcd->clock_list);
-	spin_unlock_irq(&pcd->lock);
+	spin_lock_irq(&psd->lock);
+	list_add_tail(&ce->node, &psd->clock_list);
+	spin_unlock_irq(&psd->lock);
 	return 0;
 }
 
@@ -130,15 +120,15 @@
  */
 void pm_clk_remove(struct device *dev, const char *con_id)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 
-	if (!pcd)
+	if (!psd)
 		return;
 
-	spin_lock_irq(&pcd->lock);
+	spin_lock_irq(&psd->lock);
 
-	list_for_each_entry(ce, &pcd->clock_list, node) {
+	list_for_each_entry(ce, &psd->clock_list, node) {
 		if (!con_id && !ce->con_id)
 			goto remove;
 		else if (!con_id || !ce->con_id)
@@ -147,12 +137,12 @@
 			goto remove;
 	}
 
-	spin_unlock_irq(&pcd->lock);
+	spin_unlock_irq(&psd->lock);
 	return;
 
  remove:
 	list_del(&ce->node);
-	spin_unlock_irq(&pcd->lock);
+	spin_unlock_irq(&psd->lock);
 
 	__pm_clk_remove(ce);
 }
@@ -161,23 +151,27 @@
  * pm_clk_init - Initialize a device's list of power management clocks.
  * @dev: Device to initialize the list of PM clocks for.
  *
- * Allocate a struct pm_clk_data object, initialize its lock member and
- * make the @dev's power.subsys_data field point to it.
+ * Initialize the lock and clock_list members of the device's pm_subsys_data
+ * object.
  */
-int pm_clk_init(struct device *dev)
+void pm_clk_init(struct device *dev)
 {
-	struct pm_clk_data *pcd;
+	struct pm_subsys_data *psd = dev_to_psd(dev);
+	if (psd)
+		INIT_LIST_HEAD(&psd->clock_list);
+}
 
-	pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
-	if (!pcd) {
-		dev_err(dev, "Not enough memory for PM clock data.\n");
-		return -ENOMEM;
-	}
-
-	INIT_LIST_HEAD(&pcd->clock_list);
-	spin_lock_init(&pcd->lock);
-	dev->power.subsys_data = pcd;
-	return 0;
+/**
+ * pm_clk_create - Create and initialize a device's list of PM clocks.
+ * @dev: Device to create and initialize the list of PM clocks for.
+ *
+ * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
+ * members and make the @dev's power.subsys_data field point to it.
+ */
+int pm_clk_create(struct device *dev)
+{
+	int ret = dev_pm_get_subsys_data(dev);
+	return ret < 0 ? ret : 0;
 }
 
 /**
@@ -185,29 +179,28 @@
  * @dev: Device to destroy the list of PM clocks for.
  *
  * Clear the @dev's power.subsys_data field, remove the list of clock entries
- * from the struct pm_clk_data object pointed to by it before and free
+ * from the struct pm_subsys_data object pointed to by it before and free
  * that object.
  */
 void pm_clk_destroy(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce, *c;
 	struct list_head list;
 
-	if (!pcd)
+	if (!psd)
 		return;
 
-	dev->power.subsys_data = NULL;
 	INIT_LIST_HEAD(&list);
 
-	spin_lock_irq(&pcd->lock);
+	spin_lock_irq(&psd->lock);
 
-	list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
+	list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
 		list_move(&ce->node, &list);
 
-	spin_unlock_irq(&pcd->lock);
+	spin_unlock_irq(&psd->lock);
 
-	kfree(pcd);
+	dev_pm_put_subsys_data(dev);
 
 	list_for_each_entry_safe_reverse(ce, c, &list, node) {
 		list_del(&ce->node);
@@ -225,25 +218,25 @@
  */
 int pm_clk_suspend(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 	unsigned long flags;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
-	if (!pcd)
+	if (!psd)
 		return 0;
 
-	spin_lock_irqsave(&pcd->lock, flags);
+	spin_lock_irqsave(&psd->lock, flags);
 
-	list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
+	list_for_each_entry_reverse(ce, &psd->clock_list, node) {
 		if (ce->status < PCE_STATUS_ERROR) {
 			clk_disable(ce->clk);
 			ce->status = PCE_STATUS_ACQUIRED;
 		}
 	}
 
-	spin_unlock_irqrestore(&pcd->lock, flags);
+	spin_unlock_irqrestore(&psd->lock, flags);
 
 	return 0;
 }
@@ -254,25 +247,25 @@
  */
 int pm_clk_resume(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 	unsigned long flags;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
-	if (!pcd)
+	if (!psd)
 		return 0;
 
-	spin_lock_irqsave(&pcd->lock, flags);
+	spin_lock_irqsave(&psd->lock, flags);
 
-	list_for_each_entry(ce, &pcd->clock_list, node) {
+	list_for_each_entry(ce, &psd->clock_list, node) {
 		if (ce->status < PCE_STATUS_ERROR) {
 			clk_enable(ce->clk);
 			ce->status = PCE_STATUS_ENABLED;
 		}
 	}
 
-	spin_unlock_irqrestore(&pcd->lock, flags);
+	spin_unlock_irqrestore(&psd->lock, flags);
 
 	return 0;
 }
@@ -310,7 +303,7 @@
 		if (dev->pm_domain)
 			break;
 
-		error = pm_clk_init(dev);
+		error = pm_clk_create(dev);
 		if (error)
 			break;
 
@@ -345,22 +338,22 @@
  */
 int pm_clk_suspend(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 	unsigned long flags;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
 	/* If there is no driver, the clocks are already disabled. */
-	if (!pcd || !dev->driver)
+	if (!psd || !dev->driver)
 		return 0;
 
-	spin_lock_irqsave(&pcd->lock, flags);
+	spin_lock_irqsave(&psd->lock, flags);
 
-	list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+	list_for_each_entry_reverse(ce, &psd->clock_list, node)
 		clk_disable(ce->clk);
 
-	spin_unlock_irqrestore(&pcd->lock, flags);
+	spin_unlock_irqrestore(&psd->lock, flags);
 
 	return 0;
 }
@@ -371,22 +364,22 @@
  */
 int pm_clk_resume(struct device *dev)
 {
-	struct pm_clk_data *pcd = __to_pcd(dev);
+	struct pm_subsys_data *psd = dev_to_psd(dev);
 	struct pm_clock_entry *ce;
 	unsigned long flags;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
 	/* If there is no driver, the clocks should remain disabled. */
-	if (!pcd || !dev->driver)
+	if (!psd || !dev->driver)
 		return 0;
 
-	spin_lock_irqsave(&pcd->lock, flags);
+	spin_lock_irqsave(&psd->lock, flags);
 
-	list_for_each_entry(ce, &pcd->clock_list, node)
+	list_for_each_entry(ce, &psd->clock_list, node)
 		clk_enable(ce->clk);
 
-	spin_unlock_irqrestore(&pcd->lock, flags);
+	spin_unlock_irqrestore(&psd->lock, flags);
 
 	return 0;
 }
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
new file mode 100644
index 0000000..29820c3
--- /dev/null
+++ b/drivers/base/power/common.c
@@ -0,0 +1,86 @@
+/*
+ * drivers/base/power/common.c - Common device power management code.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pm_clock.h>
+
+/**
+ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+ * @dev: Device to handle.
+ *
+ * If power.subsys_data is NULL, point it to a new object, otherwise increment
+ * its reference counter.  Return 1 if a new object has been created, otherwise
+ * return 0 or error code.
+ */
+int dev_pm_get_subsys_data(struct device *dev)
+{
+	struct pm_subsys_data *psd;
+	int ret = 0;
+
+	psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+	if (!psd)
+		return -ENOMEM;
+
+	spin_lock_irq(&dev->power.lock);
+
+	if (dev->power.subsys_data) {
+		dev->power.subsys_data->refcount++;
+	} else {
+		spin_lock_init(&psd->lock);
+		psd->refcount = 1;
+		dev->power.subsys_data = psd;
+		pm_clk_init(dev);
+		psd = NULL;
+		ret = 1;
+	}
+
+	spin_unlock_irq(&dev->power.lock);
+
+	/* kfree() verifies that its argument is nonzero. */
+	kfree(psd);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
+
+/**
+ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
+ * @dev: Device to handle.
+ *
+ * If the reference counter of power.subsys_data is zero after dropping the
+ * reference, power.subsys_data is removed.  Return 1 if that happens or 0
+ * otherwise.
+ */
+int dev_pm_put_subsys_data(struct device *dev)
+{
+	struct pm_subsys_data *psd;
+	int ret = 0;
+
+	spin_lock_irq(&dev->power.lock);
+
+	psd = dev_to_psd(dev);
+	if (!psd) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (--psd->refcount == 0) {
+		dev->power.subsys_data = NULL;
+		kfree(psd);
+		ret = 1;
+	}
+
+ out:
+	spin_unlock_irq(&dev->power.lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 1c37457..22fe029 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -29,10 +29,20 @@
 	return pd_to_genpd(dev->pm_domain);
 }
 
-static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
 {
-	if (!WARN_ON(genpd->sd_count == 0))
-			genpd->sd_count--;
+	bool ret = false;
+
+	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
+		ret = !!atomic_dec_and_test(&genpd->sd_count);
+
+	return ret;
+}
+
+static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
+{
+	atomic_inc(&genpd->sd_count);
+	smp_mb__after_atomic_inc();
 }
 
 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
@@ -71,60 +81,97 @@
 }
 
 /**
- * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+ * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
  *
- * Restore power to @genpd and all of its parents so that it is possible to
+ * Restore power to @genpd and all of its masters so that it is possible to
  * resume a device belonging to it.
  */
-int pm_genpd_poweron(struct generic_pm_domain *genpd)
+int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
-	struct generic_pm_domain *parent = genpd->parent;
+	struct gpd_link *link;
+	DEFINE_WAIT(wait);
 	int ret = 0;
 
- start:
-	if (parent) {
-		genpd_acquire_lock(parent);
-		mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
-	} else {
+	/* If the domain's master is being waited for, we have to wait too. */
+	for (;;) {
+		prepare_to_wait(&genpd->status_wait_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+		if (genpd->status != GPD_STATE_WAIT_MASTER)
+			break;
+		mutex_unlock(&genpd->lock);
+
+		schedule();
+
 		mutex_lock(&genpd->lock);
 	}
+	finish_wait(&genpd->status_wait_queue, &wait);
 
 	if (genpd->status == GPD_STATE_ACTIVE
 	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
-		goto out;
+		return 0;
 
 	if (genpd->status != GPD_STATE_POWER_OFF) {
 		genpd_set_active(genpd);
-		goto out;
+		return 0;
 	}
 
-	if (parent && parent->status != GPD_STATE_ACTIVE) {
+	/*
+	 * The list is guaranteed not to change while the loop below is being
+	 * executed, unless one of the masters' .power_on() callbacks fiddles
+	 * with it.
+	 */
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_inc(link->master);
+		genpd->status = GPD_STATE_WAIT_MASTER;
+
 		mutex_unlock(&genpd->lock);
-		genpd_release_lock(parent);
 
-		ret = pm_genpd_poweron(parent);
-		if (ret)
-			return ret;
+		ret = pm_genpd_poweron(link->master);
 
-		goto start;
+		mutex_lock(&genpd->lock);
+
+		/*
+		 * The "wait for parent" status is guaranteed not to change
+		 * while the master is powering on.
+		 */
+		genpd->status = GPD_STATE_POWER_OFF;
+		wake_up_all(&genpd->status_wait_queue);
+		if (ret) {
+			genpd_sd_counter_dec(link->master);
+			goto err;
+		}
 	}
 
 	if (genpd->power_on) {
 		ret = genpd->power_on(genpd);
 		if (ret)
-			goto out;
+			goto err;
 	}
 
 	genpd_set_active(genpd);
-	if (parent)
-		parent->sd_count++;
 
- out:
+	return 0;
+
+ err:
+	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
+		genpd_sd_counter_dec(link->master);
+
+	return ret;
+}
+
+/**
+ * pm_genpd_poweron - Restore power to a given PM domain and its masters.
+ * @genpd: PM domain to power up.
+ */
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+	int ret;
+
+	mutex_lock(&genpd->lock);
+	ret = __pm_genpd_poweron(genpd);
 	mutex_unlock(&genpd->lock);
-	if (parent)
-		genpd_release_lock(parent);
-
 	return ret;
 }
 
@@ -134,18 +181,19 @@
 
 /**
  * __pm_genpd_save_device - Save the pre-suspend state of a device.
- * @dle: Device list entry of the device to save the state of.
+ * @pdd: Domain data of the device to save the state of.
  * @genpd: PM domain the device belongs to.
  */
-static int __pm_genpd_save_device(struct dev_list_entry *dle,
+static int __pm_genpd_save_device(struct pm_domain_data *pdd,
 				  struct generic_pm_domain *genpd)
 	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
-	struct device *dev = dle->dev;
+	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+	struct device *dev = pdd->dev;
 	struct device_driver *drv = dev->driver;
 	int ret = 0;
 
-	if (dle->need_restore)
+	if (gpd_data->need_restore)
 		return 0;
 
 	mutex_unlock(&genpd->lock);
@@ -163,24 +211,25 @@
 	mutex_lock(&genpd->lock);
 
 	if (!ret)
-		dle->need_restore = true;
+		gpd_data->need_restore = true;
 
 	return ret;
 }
 
 /**
  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
- * @dle: Device list entry of the device to restore the state of.
+ * @pdd: Domain data of the device to restore the state of.
  * @genpd: PM domain the device belongs to.
  */
-static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
 				      struct generic_pm_domain *genpd)
 	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
-	struct device *dev = dle->dev;
+	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+	struct device *dev = pdd->dev;
 	struct device_driver *drv = dev->driver;
 
-	if (!dle->need_restore)
+	if (!gpd_data->need_restore)
 		return;
 
 	mutex_unlock(&genpd->lock);
@@ -197,7 +246,7 @@
 
 	mutex_lock(&genpd->lock);
 
-	dle->need_restore = false;
+	gpd_data->need_restore = false;
 }
 
 /**
@@ -211,7 +260,8 @@
  */
 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
 {
-	return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+	return genpd->status == GPD_STATE_WAIT_MASTER
+		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
 }
 
 /**
@@ -238,8 +288,8 @@
 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
 	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
-	struct generic_pm_domain *parent;
-	struct dev_list_entry *dle;
+	struct pm_domain_data *pdd;
+	struct gpd_link *link;
 	unsigned int not_suspended;
 	int ret = 0;
 
@@ -247,19 +297,22 @@
 	/*
 	 * Do not try to power off the domain in the following situations:
 	 * (1) The domain is already in the "power off" state.
-	 * (2) System suspend is in progress.
+	 * (2) The domain is waiting for its master to power up.
 	 * (3) One of the domain's devices is being resumed right now.
+	 * (4) System suspend is in progress.
 	 */
-	if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
-	    || genpd->resume_count > 0)
+	if (genpd->status == GPD_STATE_POWER_OFF
+	    || genpd->status == GPD_STATE_WAIT_MASTER
+	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
 		return 0;
 
-	if (genpd->sd_count > 0)
+	if (atomic_read(&genpd->sd_count) > 0)
 		return -EBUSY;
 
 	not_suspended = 0;
-	list_for_each_entry(dle, &genpd->dev_list, node)
-		if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
+	list_for_each_entry(pdd, &genpd->dev_list, list_node)
+		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
+		    || pdd->dev->power.irq_safe))
 			not_suspended++;
 
 	if (not_suspended > genpd->in_progress)
@@ -282,54 +335,50 @@
 	genpd->status = GPD_STATE_BUSY;
 	genpd->poweroff_task = current;
 
-	list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
-		ret = __pm_genpd_save_device(dle, genpd);
+	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+		ret = atomic_read(&genpd->sd_count) == 0 ?
+			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
+
+		if (genpd_abort_poweroff(genpd))
+			goto out;
+
 		if (ret) {
 			genpd_set_active(genpd);
 			goto out;
 		}
 
-		if (genpd_abort_poweroff(genpd))
-			goto out;
-
 		if (genpd->status == GPD_STATE_REPEAT) {
 			genpd->poweroff_task = NULL;
 			goto start;
 		}
 	}
 
-	parent = genpd->parent;
-	if (parent) {
-		mutex_unlock(&genpd->lock);
-
-		genpd_acquire_lock(parent);
-		mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
-
-		if (genpd_abort_poweroff(genpd)) {
-			genpd_release_lock(parent);
+	if (genpd->power_off) {
+		if (atomic_read(&genpd->sd_count) > 0) {
+			ret = -EBUSY;
 			goto out;
 		}
-	}
 
-	if (genpd->power_off) {
+		/*
+		 * If sd_count > 0 at this point, one of the subdomains hasn't
+		 * managed to call pm_genpd_poweron() for the master yet after
+		 * incrementing it.  In that case pm_genpd_poweron() will wait
+		 * for us to drop the lock, so we can call .power_off() and let
+		 * the pm_genpd_poweron() restore power for us (this shouldn't
+		 * happen very often).
+		 */
 		ret = genpd->power_off(genpd);
 		if (ret == -EBUSY) {
 			genpd_set_active(genpd);
-			if (parent)
-				genpd_release_lock(parent);
-
 			goto out;
 		}
 	}
 
 	genpd->status = GPD_STATE_POWER_OFF;
 
-	if (parent) {
-		genpd_sd_counter_dec(parent);
-		if (parent->sd_count == 0)
-			genpd_queue_power_off_work(parent);
-
-		genpd_release_lock(parent);
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_dec(link->master);
+		genpd_queue_power_off_work(link->master);
 	}
 
  out:
@@ -371,12 +420,21 @@
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
+	might_sleep_if(!genpd->dev_irq_safe);
+
 	if (genpd->stop_device) {
 		int ret = genpd->stop_device(dev);
 		if (ret)
 			return ret;
 	}
 
+	/*
+	 * If power.irq_safe is set, this routine will be run with interrupts
+	 * off, so it can't use mutexes.
+	 */
+	if (dev->power.irq_safe)
+		return 0;
+
 	mutex_lock(&genpd->lock);
 	genpd->in_progress++;
 	pm_genpd_poweroff(genpd);
@@ -387,24 +445,6 @@
 }
 
 /**
- * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
- * @dev: Device to resume.
- * @genpd: PM domain the device belongs to.
- */
-static void __pm_genpd_runtime_resume(struct device *dev,
-				      struct generic_pm_domain *genpd)
-{
-	struct dev_list_entry *dle;
-
-	list_for_each_entry(dle, &genpd->dev_list, node) {
-		if (dle->dev == dev) {
-			__pm_genpd_restore_device(dle, genpd);
-			break;
-		}
-	}
-}
-
-/**
  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
  * @dev: Device to resume.
  *
@@ -424,11 +464,18 @@
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	ret = pm_genpd_poweron(genpd);
-	if (ret)
-		return ret;
+	might_sleep_if(!genpd->dev_irq_safe);
+
+	/* If power.irq_safe, the PM domain is never powered off. */
+	if (dev->power.irq_safe)
+		goto out;
 
 	mutex_lock(&genpd->lock);
+	ret = __pm_genpd_poweron(genpd);
+	if (ret) {
+		mutex_unlock(&genpd->lock);
+		return ret;
+	}
 	genpd->status = GPD_STATE_BUSY;
 	genpd->resume_count++;
 	for (;;) {
@@ -448,12 +495,13 @@
 		mutex_lock(&genpd->lock);
 	}
 	finish_wait(&genpd->status_wait_queue, &wait);
-	__pm_genpd_runtime_resume(dev, genpd);
+	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
 	genpd->resume_count--;
 	genpd_set_active(genpd);
 	wake_up_all(&genpd->status_wait_queue);
 	mutex_unlock(&genpd->lock);
 
+ out:
 	if (genpd->start_device)
 		genpd->start_device(dev);
 
@@ -478,8 +526,6 @@
 #else
 
 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
-static inline void __pm_genpd_runtime_resume(struct device *dev,
-					     struct generic_pm_domain *genpd) {}
 
 #define pm_genpd_runtime_suspend	NULL
 #define pm_genpd_runtime_resume		NULL
@@ -489,11 +535,11 @@
 #ifdef CONFIG_PM_SLEEP
 
 /**
- * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
+ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
  * @genpd: PM domain to power off, if possible.
  *
  * Check if the given PM domain can be powered off (during system suspend or
- * hibernation) and do that if so.  Also, in that case propagate to its parent.
+ * hibernation) and do that if so.  Also, in that case propagate to its masters.
  *
  * This function is only called in "noirq" stages of system power transitions,
  * so it need not acquire locks (all of the "noirq" callbacks are executed
@@ -501,21 +547,23 @@
  */
 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
 {
-	struct generic_pm_domain *parent = genpd->parent;
+	struct gpd_link *link;
 
 	if (genpd->status == GPD_STATE_POWER_OFF)
 		return;
 
-	if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
+	if (genpd->suspended_count != genpd->device_count
+	    || atomic_read(&genpd->sd_count) > 0)
 		return;
 
 	if (genpd->power_off)
 		genpd->power_off(genpd);
 
 	genpd->status = GPD_STATE_POWER_OFF;
-	if (parent) {
-		genpd_sd_counter_dec(parent);
-		pm_genpd_sync_poweroff(parent);
+
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		genpd_sd_counter_dec(link->master);
+		pm_genpd_sync_poweroff(link->master);
 	}
 }
 
@@ -1034,7 +1082,8 @@
  */
 int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
 {
-	struct dev_list_entry *dle;
+	struct generic_pm_domain_data *gpd_data;
+	struct pm_domain_data *pdd;
 	int ret = 0;
 
 	dev_dbg(dev, "%s()\n", __func__);
@@ -1054,26 +1103,26 @@
 		goto out;
 	}
 
-	list_for_each_entry(dle, &genpd->dev_list, node)
-		if (dle->dev == dev) {
+	list_for_each_entry(pdd, &genpd->dev_list, list_node)
+		if (pdd->dev == dev) {
 			ret = -EINVAL;
 			goto out;
 		}
 
-	dle = kzalloc(sizeof(*dle), GFP_KERNEL);
-	if (!dle) {
+	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+	if (!gpd_data) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	dle->dev = dev;
-	dle->need_restore = false;
-	list_add_tail(&dle->node, &genpd->dev_list);
 	genpd->device_count++;
 
-	spin_lock_irq(&dev->power.lock);
 	dev->pm_domain = &genpd->domain;
-	spin_unlock_irq(&dev->power.lock);
+	dev_pm_get_subsys_data(dev);
+	dev->power.subsys_data->domain_data = &gpd_data->base;
+	gpd_data->base.dev = dev;
+	gpd_data->need_restore = false;
+	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
 
  out:
 	genpd_release_lock(genpd);
@@ -1089,7 +1138,7 @@
 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 			   struct device *dev)
 {
-	struct dev_list_entry *dle;
+	struct pm_domain_data *pdd;
 	int ret = -EINVAL;
 
 	dev_dbg(dev, "%s()\n", __func__);
@@ -1104,17 +1153,17 @@
 		goto out;
 	}
 
-	list_for_each_entry(dle, &genpd->dev_list, node) {
-		if (dle->dev != dev)
+	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+		if (pdd->dev != dev)
 			continue;
 
-		spin_lock_irq(&dev->power.lock);
+		list_del_init(&pdd->list_node);
+		pdd->dev = NULL;
+		dev_pm_put_subsys_data(dev);
 		dev->pm_domain = NULL;
-		spin_unlock_irq(&dev->power.lock);
+		kfree(to_gpd_data(pdd));
 
 		genpd->device_count--;
-		list_del(&dle->node);
-		kfree(dle);
 
 		ret = 0;
 		break;
@@ -1129,48 +1178,55 @@
 /**
  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
  * @genpd: Master PM domain to add the subdomain to.
- * @new_subdomain: Subdomain to be added.
+ * @subdomain: Subdomain to be added.
  */
 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
-			   struct generic_pm_domain *new_subdomain)
+			   struct generic_pm_domain *subdomain)
 {
-	struct generic_pm_domain *subdomain;
+	struct gpd_link *link;
 	int ret = 0;
 
-	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
 		return -EINVAL;
 
  start:
 	genpd_acquire_lock(genpd);
-	mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
 
-	if (new_subdomain->status != GPD_STATE_POWER_OFF
-	    && new_subdomain->status != GPD_STATE_ACTIVE) {
-		mutex_unlock(&new_subdomain->lock);
+	if (subdomain->status != GPD_STATE_POWER_OFF
+	    && subdomain->status != GPD_STATE_ACTIVE) {
+		mutex_unlock(&subdomain->lock);
 		genpd_release_lock(genpd);
 		goto start;
 	}
 
 	if (genpd->status == GPD_STATE_POWER_OFF
-	    &&  new_subdomain->status != GPD_STATE_POWER_OFF) {
+	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
-		if (subdomain == new_subdomain) {
+	list_for_each_entry(link, &genpd->slave_links, slave_node) {
+		if (link->slave == subdomain && link->master == genpd) {
 			ret = -EINVAL;
 			goto out;
 		}
 	}
 
-	list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
-	new_subdomain->parent = genpd;
+	link = kzalloc(sizeof(*link), GFP_KERNEL);
+	if (!link) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	link->master = genpd;
+	list_add_tail(&link->master_node, &genpd->master_links);
+	link->slave = subdomain;
+	list_add_tail(&link->slave_node, &subdomain->slave_links);
 	if (subdomain->status != GPD_STATE_POWER_OFF)
-		genpd->sd_count++;
+		genpd_sd_counter_inc(genpd);
 
  out:
-	mutex_unlock(&new_subdomain->lock);
+	mutex_unlock(&subdomain->lock);
 	genpd_release_lock(genpd);
 
 	return ret;
@@ -1179,22 +1235,22 @@
 /**
  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
  * @genpd: Master PM domain to remove the subdomain from.
- * @target: Subdomain to be removed.
+ * @subdomain: Subdomain to be removed.
  */
 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
-			      struct generic_pm_domain *target)
+			      struct generic_pm_domain *subdomain)
 {
-	struct generic_pm_domain *subdomain;
+	struct gpd_link *link;
 	int ret = -EINVAL;
 
-	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
+	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
 		return -EINVAL;
 
  start:
 	genpd_acquire_lock(genpd);
 
-	list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
-		if (subdomain != target)
+	list_for_each_entry(link, &genpd->master_links, master_node) {
+		if (link->slave != subdomain)
 			continue;
 
 		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
@@ -1206,8 +1262,9 @@
 			goto start;
 		}
 
-		list_del(&subdomain->sd_node);
-		subdomain->parent = NULL;
+		list_del(&link->master_node);
+		list_del(&link->slave_node);
+		kfree(link);
 		if (subdomain->status != GPD_STATE_POWER_OFF)
 			genpd_sd_counter_dec(genpd);
 
@@ -1234,15 +1291,14 @@
 	if (IS_ERR_OR_NULL(genpd))
 		return;
 
-	INIT_LIST_HEAD(&genpd->sd_node);
-	genpd->parent = NULL;
+	INIT_LIST_HEAD(&genpd->master_links);
+	INIT_LIST_HEAD(&genpd->slave_links);
 	INIT_LIST_HEAD(&genpd->dev_list);
-	INIT_LIST_HEAD(&genpd->sd_list);
 	mutex_init(&genpd->lock);
 	genpd->gov = gov;
 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
 	genpd->in_progress = 0;
-	genpd->sd_count = 0;
+	atomic_set(&genpd->sd_count, 0);
 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
 	init_waitqueue_head(&genpd->status_wait_queue);
 	genpd->poweroff_task = NULL;
diff --git a/include/linux/device.h b/include/linux/device.h
index c20dfbf..5d200ed 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -636,6 +636,11 @@
 }
 #endif
 
+static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
+{
+	return dev ? dev->power.subsys_data : NULL;
+}
+
 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
 {
 	return dev->kobj.uevent_suppress;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 18de9f8..f497ed0 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -423,6 +423,22 @@
 
 struct wakeup_source;
 
+struct pm_domain_data {
+	struct list_head list_node;
+	struct device *dev;
+};
+
+struct pm_subsys_data {
+	spinlock_t lock;
+	unsigned int refcount;
+#ifdef CONFIG_PM_CLK
+	struct list_head clock_list;
+#endif
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+	struct pm_domain_data *domain_data;
+#endif
+};
+
 struct dev_pm_info {
 	pm_message_t		power_state;
 	unsigned int		can_wakeup:1;
@@ -464,10 +480,12 @@
 	unsigned long		suspended_jiffies;
 	unsigned long		accounting_timestamp;
 #endif
-	void			*subsys_data;  /* Owned by the subsystem. */
+	struct pm_subsys_data	*subsys_data;  /* Owned by the subsystem. */
 };
 
 extern void update_pm_runtime_accounting(struct device *dev);
+extern int dev_pm_get_subsys_data(struct device *dev);
+extern int dev_pm_put_subsys_data(struct device *dev);
 
 /*
  * Power domains provide callbacks that are executed during system suspend,
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
new file mode 100644
index 0000000..8348866
--- /dev/null
+++ b/include/linux/pm_clock.h
@@ -0,0 +1,71 @@
+/*
+ * pm_clock.h - Definitions and headers related to device clocks.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LINUX_PM_CLOCK_H
+#define _LINUX_PM_CLOCK_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+struct pm_clk_notifier_block {
+	struct notifier_block nb;
+	struct dev_pm_domain *pm_domain;
+	char *con_ids[];
+};
+
+#ifdef CONFIG_PM_CLK
+static inline bool pm_clk_no_clocks(struct device *dev)
+{
+	return dev && dev->power.subsys_data
+		&& list_empty(&dev->power.subsys_data->clock_list);
+}
+
+extern void pm_clk_init(struct device *dev);
+extern int pm_clk_create(struct device *dev);
+extern void pm_clk_destroy(struct device *dev);
+extern int pm_clk_add(struct device *dev, const char *con_id);
+extern void pm_clk_remove(struct device *dev, const char *con_id);
+extern int pm_clk_suspend(struct device *dev);
+extern int pm_clk_resume(struct device *dev);
+#else
+static inline bool pm_clk_no_clocks(struct device *dev)
+{
+	return true;
+}
+static inline void pm_clk_init(struct device *dev)
+{
+}
+static inline int pm_clk_create(struct device *dev)
+{
+	return -EINVAL;
+}
+static inline void pm_clk_destroy(struct device *dev)
+{
+}
+static inline int pm_clk_add(struct device *dev, const char *con_id)
+{
+	return -EINVAL;
+}
+static inline void pm_clk_remove(struct device *dev, const char *con_id)
+{
+}
+#define pm_clk_suspend	NULL
+#define pm_clk_resume	NULL
+#endif
+
+#ifdef CONFIG_HAVE_CLK
+extern void pm_clk_add_notifier(struct bus_type *bus,
+					struct pm_clk_notifier_block *clknb);
+#else
+static inline void pm_clk_add_notifier(struct bus_type *bus,
+					struct pm_clk_notifier_block *clknb)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index f9ec173..65633e5 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -13,6 +13,7 @@
 
 enum gpd_status {
 	GPD_STATE_ACTIVE = 0,	/* PM domain is active */
+	GPD_STATE_WAIT_MASTER,	/* PM domain's master is being waited for */
 	GPD_STATE_BUSY,		/* Something is happening to the PM domain */
 	GPD_STATE_REPEAT,	/* Power off in progress, to be repeated */
 	GPD_STATE_POWER_OFF,	/* PM domain is off */
@@ -25,15 +26,14 @@
 struct generic_pm_domain {
 	struct dev_pm_domain domain;	/* PM domain operations */
 	struct list_head gpd_list_node;	/* Node in the global PM domains list */
-	struct list_head sd_node;	/* Node in the parent's subdomain list */
-	struct generic_pm_domain *parent;	/* Parent PM domain */
-	struct list_head sd_list;	/* List of dubdomains */
+	struct list_head master_links;	/* Links with PM domain as a master */
+	struct list_head slave_links;	/* Links with PM domain as a slave */
 	struct list_head dev_list;	/* List of devices */
 	struct mutex lock;
 	struct dev_power_governor *gov;
 	struct work_struct power_off_work;
 	unsigned int in_progress;	/* Number of devices being suspended now */
-	unsigned int sd_count;	/* Number of subdomains with power "on" */
+	atomic_t sd_count;	/* Number of subdomains with power "on" */
 	enum gpd_status status;	/* Current state of the domain */
 	wait_queue_head_t status_wait_queue;
 	struct task_struct *poweroff_task;	/* Powering off task */
@@ -42,6 +42,7 @@
 	unsigned int suspended_count;	/* System suspend device counter */
 	unsigned int prepared_count;	/* Suspend counter of prepared devices */
 	bool suspend_power_off;	/* Power status before system suspend */
+	bool dev_irq_safe;	/* Device callbacks are IRQ-safe */
 	int (*power_off)(struct generic_pm_domain *domain);
 	int (*power_on)(struct generic_pm_domain *domain);
 	int (*start_device)(struct device *dev);
@@ -54,12 +55,23 @@
 	return container_of(pd, struct generic_pm_domain, domain);
 }
 
-struct dev_list_entry {
-	struct list_head node;
-	struct device *dev;
+struct gpd_link {
+	struct generic_pm_domain *master;
+	struct list_head master_node;
+	struct generic_pm_domain *slave;
+	struct list_head slave_node;
+};
+
+struct generic_pm_domain_data {
+	struct pm_domain_data base;
 	bool need_restore;
 };
 
+static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
+{
+	return container_of(pdd, struct generic_pm_domain_data, base);
+}
+
 #ifdef CONFIG_PM_GENERIC_DOMAINS
 extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
 			       struct device *dev);
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index daac05d..70b2840 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -251,46 +251,4 @@
 	__pm_runtime_use_autosuspend(dev, false);
 }
 
-struct pm_clk_notifier_block {
-	struct notifier_block nb;
-	struct dev_pm_domain *pm_domain;
-	char *con_ids[];
-};
-
-#ifdef CONFIG_PM_CLK
-extern int pm_clk_init(struct device *dev);
-extern void pm_clk_destroy(struct device *dev);
-extern int pm_clk_add(struct device *dev, const char *con_id);
-extern void pm_clk_remove(struct device *dev, const char *con_id);
-extern int pm_clk_suspend(struct device *dev);
-extern int pm_clk_resume(struct device *dev);
-#else
-static inline int pm_clk_init(struct device *dev)
-{
-	return -EINVAL;
-}
-static inline void pm_clk_destroy(struct device *dev)
-{
-}
-static inline int pm_clk_add(struct device *dev, const char *con_id)
-{
-	return -EINVAL;
-}
-static inline void pm_clk_remove(struct device *dev, const char *con_id)
-{
-}
-#define pm_clk_suspend	NULL
-#define pm_clk_resume	NULL
-#endif
-
-#ifdef CONFIG_HAVE_CLK
-extern void pm_clk_add_notifier(struct bus_type *bus,
-					struct pm_clk_notifier_block *clknb);
-#else
-static inline void pm_clk_add_notifier(struct bus_type *bus,
-					struct pm_clk_notifier_block *clknb)
-{
-}
-#endif
-
 #endif