Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 1 | /* |
| 2 | * OMAP2/3 PRM module functions |
| 3 | * |
Paul Walmsley | 26c98c5 | 2011-12-16 14:36:58 -0700 | [diff] [blame] | 4 | * Copyright (C) 2010-2011 Texas Instruments, Inc. |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 5 | * Copyright (C) 2010 Nokia Corporation |
| 6 | * BenoƮt Cousson |
| 7 | * Paul Walmsley |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/kernel.h> |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 15 | #include <linux/errno.h> |
| 16 | #include <linux/err.h> |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 17 | #include <linux/io.h> |
Kevin Hilman | 99b59df | 2012-04-27 16:05:51 -0700 | [diff] [blame] | 18 | #include <linux/irq.h> |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 19 | |
Tony Lindgren | 4e65331 | 2011-11-10 22:45:17 +0100 | [diff] [blame] | 20 | #include "common.h" |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 21 | #include <plat/cpu.h> |
| 22 | #include <plat/prcm.h> |
Paul Walmsley | d19e8f2 | 2012-01-25 12:57:49 -0700 | [diff] [blame] | 23 | #include <plat/irqs.h> |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 24 | |
Kevin Hilman | 58aaa59 | 2011-03-28 10:52:04 -0700 | [diff] [blame] | 25 | #include "vp.h" |
| 26 | |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 27 | #include "prm2xxx_3xxx.h" |
| 28 | #include "cm2xxx_3xxx.h" |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 29 | #include "prm-regbits-24xx.h" |
| 30 | #include "prm-regbits-34xx.h" |
| 31 | |
Tero Kristo | 22f5137 | 2011-12-16 14:36:59 -0700 | [diff] [blame] | 32 | static const struct omap_prcm_irq omap3_prcm_irqs[] = { |
| 33 | OMAP_PRCM_IRQ("wkup", 0, 0), |
| 34 | OMAP_PRCM_IRQ("io", 9, 1), |
| 35 | }; |
| 36 | |
| 37 | static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { |
| 38 | .ack = OMAP3_PRM_IRQSTATUS_MPU_OFFSET, |
| 39 | .mask = OMAP3_PRM_IRQENABLE_MPU_OFFSET, |
| 40 | .nr_regs = 1, |
| 41 | .irqs = omap3_prcm_irqs, |
| 42 | .nr_irqs = ARRAY_SIZE(omap3_prcm_irqs), |
| 43 | .irq = INT_34XX_PRCM_MPU_IRQ, |
| 44 | .read_pending_irqs = &omap3xxx_prm_read_pending_irqs, |
| 45 | .ocp_barrier = &omap3xxx_prm_ocp_barrier, |
| 46 | .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, |
| 47 | .restore_irqen = &omap3xxx_prm_restore_irqen, |
| 48 | }; |
| 49 | |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 50 | u32 omap2_prm_read_mod_reg(s16 module, u16 idx) |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 51 | { |
| 52 | return __raw_readl(prm_base + module + idx); |
| 53 | } |
| 54 | |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 55 | void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx) |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 56 | { |
| 57 | __raw_writel(val, prm_base + module + idx); |
| 58 | } |
| 59 | |
| 60 | /* Read-modify-write a register in a PRM module. Caller must lock */ |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 61 | u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx) |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 62 | { |
| 63 | u32 v; |
| 64 | |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 65 | v = omap2_prm_read_mod_reg(module, idx); |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 66 | v &= ~mask; |
| 67 | v |= bits; |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 68 | omap2_prm_write_mod_reg(v, module, idx); |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 69 | |
| 70 | return v; |
| 71 | } |
| 72 | |
| 73 | /* Read a PRM register, AND it, and shift the result down to bit 0 */ |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 74 | u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask) |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 75 | { |
| 76 | u32 v; |
| 77 | |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 78 | v = omap2_prm_read_mod_reg(domain, idx); |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 79 | v &= mask; |
| 80 | v >>= __ffs(mask); |
| 81 | |
| 82 | return v; |
| 83 | } |
| 84 | |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 85 | u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx) |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 86 | { |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 87 | return omap2_prm_rmw_mod_reg_bits(bits, bits, module, idx); |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 88 | } |
| 89 | |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 90 | u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx) |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 91 | { |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 92 | return omap2_prm_rmw_mod_reg_bits(bits, 0x0, module, idx); |
Paul Walmsley | 59fb659 | 2010-12-21 15:30:55 -0700 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 96 | /** |
| 97 | * omap2_prm_is_hardreset_asserted - read the HW reset line state of |
| 98 | * submodules contained in the hwmod module |
| 99 | * @prm_mod: PRM submodule base (e.g. CORE_MOD) |
| 100 | * @shift: register bit shift corresponding to the reset line to check |
| 101 | * |
| 102 | * Returns 1 if the (sub)module hardreset line is currently asserted, |
| 103 | * 0 if the (sub)module hardreset line is not currently asserted, or |
| 104 | * -EINVAL if called while running on a non-OMAP2/3 chip. |
| 105 | */ |
| 106 | int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift) |
| 107 | { |
| 108 | if (!(cpu_is_omap24xx() || cpu_is_omap34xx())) |
| 109 | return -EINVAL; |
| 110 | |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 111 | return omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 112 | (1 << shift)); |
| 113 | } |
| 114 | |
| 115 | /** |
| 116 | * omap2_prm_assert_hardreset - assert the HW reset line of a submodule |
| 117 | * @prm_mod: PRM submodule base (e.g. CORE_MOD) |
| 118 | * @shift: register bit shift corresponding to the reset line to assert |
| 119 | * |
| 120 | * Some IPs like dsp or iva contain processors that require an HW |
| 121 | * reset line to be asserted / deasserted in order to fully enable the |
| 122 | * IP. These modules may have multiple hard-reset lines that reset |
| 123 | * different 'submodules' inside the IP block. This function will |
| 124 | * place the submodule into reset. Returns 0 upon success or -EINVAL |
| 125 | * upon an argument error. |
| 126 | */ |
| 127 | int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift) |
| 128 | { |
| 129 | u32 mask; |
| 130 | |
| 131 | if (!(cpu_is_omap24xx() || cpu_is_omap34xx())) |
| 132 | return -EINVAL; |
| 133 | |
| 134 | mask = 1 << shift; |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 135 | omap2_prm_rmw_mod_reg_bits(mask, mask, prm_mod, OMAP2_RM_RSTCTRL); |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 136 | |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * omap2_prm_deassert_hardreset - deassert a submodule hardreset line and wait |
| 142 | * @prm_mod: PRM submodule base (e.g. CORE_MOD) |
omar ramirez | cc1226e | 2011-03-04 13:32:44 -0700 | [diff] [blame] | 143 | * @rst_shift: register bit shift corresponding to the reset line to deassert |
| 144 | * @st_shift: register bit shift for the status of the deasserted submodule |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 145 | * |
| 146 | * Some IPs like dsp or iva contain processors that require an HW |
| 147 | * reset line to be asserted / deasserted in order to fully enable the |
| 148 | * IP. These modules may have multiple hard-reset lines that reset |
| 149 | * different 'submodules' inside the IP block. This function will |
| 150 | * take the submodule out of reset and wait until the PRCM indicates |
| 151 | * that the reset has completed before returning. Returns 0 upon success or |
| 152 | * -EINVAL upon an argument error, -EEXIST if the submodule was already out |
| 153 | * of reset, or -EBUSY if the submodule did not exit reset promptly. |
| 154 | */ |
omar ramirez | cc1226e | 2011-03-04 13:32:44 -0700 | [diff] [blame] | 155 | int omap2_prm_deassert_hardreset(s16 prm_mod, u8 rst_shift, u8 st_shift) |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 156 | { |
omar ramirez | cc1226e | 2011-03-04 13:32:44 -0700 | [diff] [blame] | 157 | u32 rst, st; |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 158 | int c; |
| 159 | |
| 160 | if (!(cpu_is_omap24xx() || cpu_is_omap34xx())) |
| 161 | return -EINVAL; |
| 162 | |
omar ramirez | cc1226e | 2011-03-04 13:32:44 -0700 | [diff] [blame] | 163 | rst = 1 << rst_shift; |
| 164 | st = 1 << st_shift; |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 165 | |
| 166 | /* Check the current status to avoid de-asserting the line twice */ |
omar ramirez | cc1226e | 2011-03-04 13:32:44 -0700 | [diff] [blame] | 167 | if (omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, rst) == 0) |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 168 | return -EEXIST; |
| 169 | |
| 170 | /* Clear the reset status by writing 1 to the status bit */ |
omar ramirez | cc1226e | 2011-03-04 13:32:44 -0700 | [diff] [blame] | 171 | omap2_prm_rmw_mod_reg_bits(0xffffffff, st, prm_mod, OMAP2_RM_RSTST); |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 172 | /* de-assert the reset control line */ |
omar ramirez | cc1226e | 2011-03-04 13:32:44 -0700 | [diff] [blame] | 173 | omap2_prm_rmw_mod_reg_bits(rst, 0, prm_mod, OMAP2_RM_RSTCTRL); |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 174 | /* wait the status to be set */ |
Paul Walmsley | c4d7e58 | 2010-12-21 21:05:14 -0700 | [diff] [blame] | 175 | omap_test_timeout(omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTST, |
omar ramirez | cc1226e | 2011-03-04 13:32:44 -0700 | [diff] [blame] | 176 | st), |
Paul Walmsley | cf21405 | 2010-09-21 10:34:10 -0600 | [diff] [blame] | 177 | MAX_MODULE_HARDRESET_WAIT, c); |
| 178 | |
| 179 | return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; |
| 180 | } |
Kevin Hilman | 58aaa59 | 2011-03-28 10:52:04 -0700 | [diff] [blame] | 181 | |
| 182 | /* PRM VP */ |
| 183 | |
| 184 | /* |
| 185 | * struct omap3_vp - OMAP3 VP register access description. |
| 186 | * @tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg |
| 187 | */ |
| 188 | struct omap3_vp { |
| 189 | u32 tranxdone_status; |
| 190 | }; |
| 191 | |
Kevin Hilman | 4bb73ad | 2011-03-28 10:25:12 -0700 | [diff] [blame] | 192 | static struct omap3_vp omap3_vp[] = { |
Kevin Hilman | 58aaa59 | 2011-03-28 10:52:04 -0700 | [diff] [blame] | 193 | [OMAP3_VP_VDD_MPU_ID] = { |
| 194 | .tranxdone_status = OMAP3430_VP1_TRANXDONE_ST_MASK, |
| 195 | }, |
| 196 | [OMAP3_VP_VDD_CORE_ID] = { |
| 197 | .tranxdone_status = OMAP3430_VP2_TRANXDONE_ST_MASK, |
| 198 | }, |
| 199 | }; |
| 200 | |
| 201 | #define MAX_VP_ID ARRAY_SIZE(omap3_vp); |
| 202 | |
| 203 | u32 omap3_prm_vp_check_txdone(u8 vp_id) |
| 204 | { |
| 205 | struct omap3_vp *vp = &omap3_vp[vp_id]; |
| 206 | u32 irqstatus; |
| 207 | |
| 208 | irqstatus = omap2_prm_read_mod_reg(OCP_MOD, |
| 209 | OMAP3_PRM_IRQSTATUS_MPU_OFFSET); |
| 210 | return irqstatus & vp->tranxdone_status; |
| 211 | } |
| 212 | |
| 213 | void omap3_prm_vp_clear_txdone(u8 vp_id) |
| 214 | { |
| 215 | struct omap3_vp *vp = &omap3_vp[vp_id]; |
| 216 | |
| 217 | omap2_prm_write_mod_reg(vp->tranxdone_status, |
| 218 | OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); |
| 219 | } |
Kevin Hilman | 4bb73ad | 2011-03-28 10:25:12 -0700 | [diff] [blame] | 220 | |
| 221 | u32 omap3_prm_vcvp_read(u8 offset) |
| 222 | { |
| 223 | return omap2_prm_read_mod_reg(OMAP3430_GR_MOD, offset); |
| 224 | } |
| 225 | |
| 226 | void omap3_prm_vcvp_write(u32 val, u8 offset) |
| 227 | { |
| 228 | omap2_prm_write_mod_reg(val, OMAP3430_GR_MOD, offset); |
| 229 | } |
| 230 | |
| 231 | u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset) |
| 232 | { |
| 233 | return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset); |
| 234 | } |
Paul Walmsley | 26c98c5 | 2011-12-16 14:36:58 -0700 | [diff] [blame] | 235 | |
| 236 | /** |
| 237 | * omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events |
| 238 | * @events: ptr to a u32, preallocated by caller |
| 239 | * |
| 240 | * Read PRM_IRQSTATUS_MPU bits, AND'ed with the currently-enabled PRM |
| 241 | * MPU IRQs, and store the result into the u32 pointed to by @events. |
| 242 | * No return value. |
| 243 | */ |
| 244 | void omap3xxx_prm_read_pending_irqs(unsigned long *events) |
| 245 | { |
| 246 | u32 mask, st; |
| 247 | |
| 248 | /* XXX Can the mask read be avoided (e.g., can it come from RAM?) */ |
| 249 | mask = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); |
| 250 | st = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); |
| 251 | |
| 252 | events[0] = mask & st; |
| 253 | } |
| 254 | |
| 255 | /** |
| 256 | * omap3xxx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete |
| 257 | * |
| 258 | * Force any buffered writes to the PRM IP block to complete. Needed |
| 259 | * by the PRM IRQ handler, which reads and writes directly to the IP |
| 260 | * block, to avoid race conditions after acknowledging or clearing IRQ |
| 261 | * bits. No return value. |
| 262 | */ |
| 263 | void omap3xxx_prm_ocp_barrier(void) |
| 264 | { |
| 265 | omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); |
| 266 | } |
Tero Kristo | 91285b6 | 2011-12-16 14:36:58 -0700 | [diff] [blame] | 267 | |
| 268 | /** |
| 269 | * omap3xxx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU reg |
| 270 | * @saved_mask: ptr to a u32 array to save IRQENABLE bits |
| 271 | * |
| 272 | * Save the PRM_IRQENABLE_MPU register to @saved_mask. @saved_mask |
| 273 | * must be allocated by the caller. Intended to be used in the PRM |
| 274 | * interrupt handler suspend callback. The OCP barrier is needed to |
| 275 | * ensure the write to disable PRM interrupts reaches the PRM before |
| 276 | * returning; otherwise, spurious interrupts might occur. No return |
| 277 | * value. |
| 278 | */ |
| 279 | void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask) |
| 280 | { |
| 281 | saved_mask[0] = omap2_prm_read_mod_reg(OCP_MOD, |
| 282 | OMAP3_PRM_IRQENABLE_MPU_OFFSET); |
| 283 | omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); |
| 284 | |
| 285 | /* OCP barrier */ |
| 286 | omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); |
| 287 | } |
| 288 | |
| 289 | /** |
| 290 | * omap3xxx_prm_restore_irqen - set PRM_IRQENABLE_MPU register from args |
| 291 | * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously |
| 292 | * |
| 293 | * Restore the PRM_IRQENABLE_MPU register from @saved_mask. Intended |
| 294 | * to be used in the PRM interrupt handler resume callback to restore |
| 295 | * values saved by omap3xxx_prm_save_and_clear_irqen(). No OCP |
| 296 | * barrier should be needed here; any pending PRM interrupts will fire |
| 297 | * once the writes reach the PRM. No return value. |
| 298 | */ |
| 299 | void omap3xxx_prm_restore_irqen(u32 *saved_mask) |
| 300 | { |
| 301 | omap2_prm_write_mod_reg(saved_mask[0], OCP_MOD, |
| 302 | OMAP3_PRM_IRQENABLE_MPU_OFFSET); |
| 303 | } |
Tero Kristo | 22f5137 | 2011-12-16 14:36:59 -0700 | [diff] [blame] | 304 | |
Vishwanath BS | 09659fa | 2012-06-22 08:40:02 -0600 | [diff] [blame] | 305 | /** |
| 306 | * omap3xxx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain |
| 307 | * |
| 308 | * Clear any previously-latched I/O wakeup events and ensure that the |
| 309 | * I/O wakeup gates are aligned with the current mux settings. Works |
| 310 | * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then |
| 311 | * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No |
| 312 | * return value. |
| 313 | */ |
| 314 | void omap3xxx_prm_reconfigure_io_chain(void) |
| 315 | { |
| 316 | int i = 0; |
| 317 | |
| 318 | omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, |
| 319 | PM_WKEN); |
| 320 | |
| 321 | omap_test_timeout(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST) & |
| 322 | OMAP3430_ST_IO_CHAIN_MASK, |
| 323 | MAX_IOPAD_LATCH_TIME, i); |
| 324 | if (i == MAX_IOPAD_LATCH_TIME) |
| 325 | pr_warn("PRM: I/O chain clock line assertion timed out\n"); |
| 326 | |
| 327 | omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, |
| 328 | PM_WKEN); |
| 329 | |
| 330 | omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK, WKUP_MOD, |
| 331 | PM_WKST); |
| 332 | |
| 333 | omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST); |
| 334 | } |
| 335 | |
Tero Kristo | 8a680ea | 2012-06-22 08:40:03 -0600 | [diff] [blame] | 336 | /** |
| 337 | * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches |
| 338 | * |
| 339 | * Activates the I/O wakeup event latches and allows events logged by |
| 340 | * those latches to signal a wakeup event to the PRCM. For I/O |
| 341 | * wakeups to occur, WAKEUPENABLE bits must be set in the pad mux |
| 342 | * registers, and omap3xxx_prm_reconfigure_io_chain() must be called. |
| 343 | * No return value. |
| 344 | */ |
| 345 | static void __init omap3xxx_prm_enable_io_wakeup(void) |
| 346 | { |
| 347 | if (omap3_has_io_wakeup()) |
| 348 | omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, |
| 349 | PM_WKEN); |
| 350 | } |
| 351 | |
Tero Kristo | 22f5137 | 2011-12-16 14:36:59 -0700 | [diff] [blame] | 352 | static int __init omap3xxx_prcm_init(void) |
| 353 | { |
Kevin Hilman | d660e9b | 2012-06-29 07:03:34 -0700 | [diff] [blame] | 354 | int ret = 0; |
| 355 | |
Kevin Hilman | 99b59df | 2012-04-27 16:05:51 -0700 | [diff] [blame] | 356 | if (cpu_is_omap34xx()) { |
Tero Kristo | 8a680ea | 2012-06-22 08:40:03 -0600 | [diff] [blame] | 357 | omap3xxx_prm_enable_io_wakeup(); |
Kevin Hilman | d660e9b | 2012-06-29 07:03:34 -0700 | [diff] [blame] | 358 | ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); |
| 359 | if (!ret) |
| 360 | irq_set_status_flags(omap_prcm_event_to_irq("io"), |
| 361 | IRQ_NOAUTOEN); |
Kevin Hilman | 99b59df | 2012-04-27 16:05:51 -0700 | [diff] [blame] | 362 | } |
| 363 | |
Kevin Hilman | d660e9b | 2012-06-29 07:03:34 -0700 | [diff] [blame] | 364 | return ret; |
Tero Kristo | 22f5137 | 2011-12-16 14:36:59 -0700 | [diff] [blame] | 365 | } |
| 366 | subsys_initcall(omap3xxx_prcm_init); |