Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 1 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | #include <linux/bitops.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/irq.h> |
| 17 | #include <linux/mfd/core.h> |
| 18 | #include <linux/mfd/wcd9xxx/core.h> |
| 19 | #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h> |
| 20 | #include <linux/mfd/wcd9xxx/wcd9310_registers.h> |
| 21 | #include <linux/interrupt.h> |
| 22 | |
Stephen Boyd | 2fcabf9 | 2012-05-30 10:41:11 -0700 | [diff] [blame] | 23 | #include <mach/cpuidle.h> |
| 24 | |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 25 | #define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE)) |
| 26 | #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) |
| 27 | |
| 28 | struct wcd9xxx_irq { |
| 29 | bool level; |
| 30 | }; |
| 31 | |
| 32 | static struct wcd9xxx_irq wcd9xxx_irqs[TABLA_NUM_IRQS] = { |
| 33 | [0] = { .level = 1}, |
| 34 | /* All other wcd9xxx interrupts are edge triggered */ |
| 35 | }; |
| 36 | |
| 37 | static inline int irq_to_wcd9xxx_irq(struct wcd9xxx *wcd9xxx, int irq) |
| 38 | { |
| 39 | return irq - wcd9xxx->irq_base; |
| 40 | } |
| 41 | |
| 42 | static void wcd9xxx_irq_lock(struct irq_data *data) |
| 43 | { |
| 44 | struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data); |
| 45 | mutex_lock(&wcd9xxx->irq_lock); |
| 46 | } |
| 47 | |
| 48 | static void wcd9xxx_irq_sync_unlock(struct irq_data *data) |
| 49 | { |
| 50 | struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data); |
| 51 | int i; |
| 52 | |
| 53 | for (i = 0; i < ARRAY_SIZE(wcd9xxx->irq_masks_cur); i++) { |
| 54 | /* If there's been a change in the mask write it back |
| 55 | * to the hardware. |
| 56 | */ |
| 57 | if (wcd9xxx->irq_masks_cur[i] != wcd9xxx->irq_masks_cache[i]) { |
| 58 | wcd9xxx->irq_masks_cache[i] = wcd9xxx->irq_masks_cur[i]; |
| 59 | wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_MASK0+i, |
| 60 | wcd9xxx->irq_masks_cur[i]); |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | mutex_unlock(&wcd9xxx->irq_lock); |
| 65 | } |
| 66 | |
| 67 | static void wcd9xxx_irq_enable(struct irq_data *data) |
| 68 | { |
| 69 | struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data); |
| 70 | int wcd9xxx_irq = irq_to_wcd9xxx_irq(wcd9xxx, data->irq); |
| 71 | wcd9xxx->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)] &= |
| 72 | ~(BYTE_BIT_MASK(wcd9xxx_irq)); |
| 73 | } |
| 74 | |
| 75 | static void wcd9xxx_irq_disable(struct irq_data *data) |
| 76 | { |
| 77 | struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data); |
| 78 | int wcd9xxx_irq = irq_to_wcd9xxx_irq(wcd9xxx, data->irq); |
| 79 | wcd9xxx->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)] |
| 80 | |= BYTE_BIT_MASK(wcd9xxx_irq); |
| 81 | } |
| 82 | |
| 83 | static struct irq_chip wcd9xxx_irq_chip = { |
| 84 | .name = "wcd9xxx", |
| 85 | .irq_bus_lock = wcd9xxx_irq_lock, |
| 86 | .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock, |
| 87 | .irq_disable = wcd9xxx_irq_disable, |
| 88 | .irq_enable = wcd9xxx_irq_enable, |
| 89 | }; |
| 90 | |
| 91 | enum wcd9xxx_pm_state wcd9xxx_pm_cmpxchg(struct wcd9xxx *wcd9xxx, |
| 92 | enum wcd9xxx_pm_state o, |
| 93 | enum wcd9xxx_pm_state n) |
| 94 | { |
| 95 | enum wcd9xxx_pm_state old; |
| 96 | mutex_lock(&wcd9xxx->pm_lock); |
| 97 | old = wcd9xxx->pm_state; |
| 98 | if (old == o) |
| 99 | wcd9xxx->pm_state = n; |
| 100 | mutex_unlock(&wcd9xxx->pm_lock); |
| 101 | return old; |
| 102 | } |
| 103 | EXPORT_SYMBOL_GPL(wcd9xxx_pm_cmpxchg); |
| 104 | |
Joonwoo Park | d7cf2e9 | 2012-03-19 19:38:23 -0700 | [diff] [blame] | 105 | bool wcd9xxx_lock_sleep(struct wcd9xxx *wcd9xxx) |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 106 | { |
| 107 | enum wcd9xxx_pm_state os; |
| 108 | |
| 109 | /* wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread |
| 110 | * and its subroutines only motly. |
| 111 | * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and |
| 112 | * it can race with wcd9xxx_irq_thread. |
| 113 | * so need to embrace wlock_holders with mutex. |
| 114 | */ |
| 115 | mutex_lock(&wcd9xxx->pm_lock); |
Joonwoo Park | d7cf2e9 | 2012-03-19 19:38:23 -0700 | [diff] [blame] | 116 | if (wcd9xxx->wlock_holders++ == 0) { |
| 117 | pr_debug("%s: holding wake lock\n", __func__); |
Stephen Boyd | 2fcabf9 | 2012-05-30 10:41:11 -0700 | [diff] [blame] | 118 | pm_qos_update_request(&wcd9xxx->pm_qos_req, |
| 119 | msm_cpuidle_get_deep_idle_latency()); |
Joonwoo Park | d7cf2e9 | 2012-03-19 19:38:23 -0700 | [diff] [blame] | 120 | } |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 121 | mutex_unlock(&wcd9xxx->pm_lock); |
Joonwoo Park | d7cf2e9 | 2012-03-19 19:38:23 -0700 | [diff] [blame] | 122 | if (!wait_event_timeout(wcd9xxx->pm_wq, |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 123 | ((os = wcd9xxx_pm_cmpxchg(wcd9xxx, WCD9XXX_PM_SLEEPABLE, |
| 124 | WCD9XXX_PM_AWAKE)) == |
| 125 | WCD9XXX_PM_SLEEPABLE || |
| 126 | (os == WCD9XXX_PM_AWAKE)), |
| 127 | 5 * HZ)) { |
| 128 | pr_err("%s: system didn't resume within 5000ms, state %d, " |
| 129 | "wlock %d\n", __func__, wcd9xxx->pm_state, |
| 130 | wcd9xxx->wlock_holders); |
Joonwoo Park | d7cf2e9 | 2012-03-19 19:38:23 -0700 | [diff] [blame] | 131 | WARN_ON(1); |
| 132 | wcd9xxx_unlock_sleep(wcd9xxx); |
| 133 | return false; |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 134 | } |
| 135 | wake_up_all(&wcd9xxx->pm_wq); |
Joonwoo Park | d7cf2e9 | 2012-03-19 19:38:23 -0700 | [diff] [blame] | 136 | return true; |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 137 | } |
| 138 | EXPORT_SYMBOL_GPL(wcd9xxx_lock_sleep); |
| 139 | |
| 140 | void wcd9xxx_unlock_sleep(struct wcd9xxx *wcd9xxx) |
| 141 | { |
| 142 | mutex_lock(&wcd9xxx->pm_lock); |
| 143 | if (--wcd9xxx->wlock_holders == 0) { |
| 144 | wcd9xxx->pm_state = WCD9XXX_PM_SLEEPABLE; |
Joonwoo Park | d7cf2e9 | 2012-03-19 19:38:23 -0700 | [diff] [blame] | 145 | pr_debug("%s: releasing wake lock\n", __func__); |
Stephen Boyd | 2fcabf9 | 2012-05-30 10:41:11 -0700 | [diff] [blame] | 146 | pm_qos_update_request(&wcd9xxx->pm_qos_req, |
| 147 | PM_QOS_DEFAULT_VALUE); |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 148 | } |
| 149 | mutex_unlock(&wcd9xxx->pm_lock); |
| 150 | wake_up_all(&wcd9xxx->pm_wq); |
| 151 | } |
| 152 | EXPORT_SYMBOL_GPL(wcd9xxx_unlock_sleep); |
| 153 | |
Joonwoo Park | 0332483 | 2012-03-19 19:36:16 -0700 | [diff] [blame] | 154 | static void wcd9xxx_irq_dispatch(struct wcd9xxx *wcd9xxx, int irqbit) |
| 155 | { |
| 156 | if ((irqbit <= TABLA_IRQ_MBHC_INSERTION) && |
| 157 | (irqbit >= TABLA_IRQ_MBHC_REMOVAL)) { |
| 158 | wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_CLEAR0 + |
| 159 | BIT_BYTE(irqbit), BYTE_BIT_MASK(irqbit)); |
| 160 | if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) |
| 161 | wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_MODE, 0x02); |
| 162 | handle_nested_irq(wcd9xxx->irq_base + irqbit); |
| 163 | } else { |
| 164 | handle_nested_irq(wcd9xxx->irq_base + irqbit); |
| 165 | wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_CLEAR0 + |
| 166 | BIT_BYTE(irqbit), BYTE_BIT_MASK(irqbit)); |
| 167 | if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) |
| 168 | wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_MODE, 0x02); |
| 169 | } |
| 170 | } |
| 171 | |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 172 | static irqreturn_t wcd9xxx_irq_thread(int irq, void *data) |
| 173 | { |
| 174 | int ret; |
| 175 | struct wcd9xxx *wcd9xxx = data; |
| 176 | u8 status[WCD9XXX_NUM_IRQ_REGS]; |
Joonwoo Park | 0332483 | 2012-03-19 19:36:16 -0700 | [diff] [blame] | 177 | int i; |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 178 | |
Joonwoo Park | d7cf2e9 | 2012-03-19 19:38:23 -0700 | [diff] [blame] | 179 | if (unlikely(wcd9xxx_lock_sleep(wcd9xxx) == false)) { |
| 180 | dev_err(wcd9xxx->dev, "Failed to hold suspend\n"); |
| 181 | return IRQ_NONE; |
| 182 | } |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 183 | ret = wcd9xxx_bulk_read(wcd9xxx, TABLA_A_INTR_STATUS0, |
| 184 | WCD9XXX_NUM_IRQ_REGS, status); |
| 185 | if (ret < 0) { |
| 186 | dev_err(wcd9xxx->dev, "Failed to read interrupt status: %d\n", |
| 187 | ret); |
| 188 | wcd9xxx_unlock_sleep(wcd9xxx); |
| 189 | return IRQ_NONE; |
| 190 | } |
| 191 | /* Apply masking */ |
| 192 | for (i = 0; i < WCD9XXX_NUM_IRQ_REGS; i++) |
| 193 | status[i] &= ~wcd9xxx->irq_masks_cur[i]; |
| 194 | |
| 195 | /* Find out which interrupt was triggered and call that interrupt's |
| 196 | * handler function |
| 197 | */ |
Joonwoo Park | 0332483 | 2012-03-19 19:36:16 -0700 | [diff] [blame] | 198 | if (status[BIT_BYTE(TABLA_IRQ_SLIMBUS)] & |
| 199 | BYTE_BIT_MASK(TABLA_IRQ_SLIMBUS)) |
| 200 | wcd9xxx_irq_dispatch(wcd9xxx, TABLA_IRQ_SLIMBUS); |
| 201 | |
| 202 | /* Since codec has only one hardware irq line which is shared by |
| 203 | * codec's different internal interrupts, so it's possible master irq |
| 204 | * handler dispatches multiple nested irq handlers after breaking |
| 205 | * order. Dispatch MBHC interrupts order to follow MBHC state |
| 206 | * machine's order */ |
| 207 | for (i = TABLA_IRQ_MBHC_INSERTION; i >= TABLA_IRQ_MBHC_REMOVAL; i--) { |
| 208 | if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i)) |
| 209 | wcd9xxx_irq_dispatch(wcd9xxx, i); |
| 210 | } |
| 211 | for (i = TABLA_IRQ_BG_PRECHARGE; i < TABLA_NUM_IRQS; i++) { |
| 212 | if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i)) |
| 213 | wcd9xxx_irq_dispatch(wcd9xxx, i); |
Asish Bhattacharya | b1aeae2 | 2012-02-15 08:29:28 +0530 | [diff] [blame] | 214 | } |
| 215 | wcd9xxx_unlock_sleep(wcd9xxx); |
| 216 | |
| 217 | return IRQ_HANDLED; |
| 218 | } |
| 219 | |
| 220 | int wcd9xxx_irq_init(struct wcd9xxx *wcd9xxx) |
| 221 | { |
| 222 | int ret; |
| 223 | unsigned int i, cur_irq; |
| 224 | |
| 225 | mutex_init(&wcd9xxx->irq_lock); |
| 226 | |
| 227 | if (!wcd9xxx->irq) { |
| 228 | dev_warn(wcd9xxx->dev, |
| 229 | "No interrupt specified, no interrupts\n"); |
| 230 | wcd9xxx->irq_base = 0; |
| 231 | return 0; |
| 232 | } |
| 233 | |
| 234 | if (!wcd9xxx->irq_base) { |
| 235 | dev_err(wcd9xxx->dev, |
| 236 | "No interrupt base specified, no interrupts\n"); |
| 237 | return 0; |
| 238 | } |
| 239 | /* Mask the individual interrupt sources */ |
| 240 | for (i = 0, cur_irq = wcd9xxx->irq_base; i < TABLA_NUM_IRQS; i++, |
| 241 | cur_irq++) { |
| 242 | |
| 243 | irq_set_chip_data(cur_irq, wcd9xxx); |
| 244 | |
| 245 | if (wcd9xxx_irqs[i].level) |
| 246 | irq_set_chip_and_handler(cur_irq, &wcd9xxx_irq_chip, |
| 247 | handle_level_irq); |
| 248 | else |
| 249 | irq_set_chip_and_handler(cur_irq, &wcd9xxx_irq_chip, |
| 250 | handle_edge_irq); |
| 251 | |
| 252 | irq_set_nested_thread(cur_irq, 1); |
| 253 | |
| 254 | /* ARM needs us to explicitly flag the IRQ as valid |
| 255 | * and will set them noprobe when we do so. */ |
| 256 | #ifdef CONFIG_ARM |
| 257 | set_irq_flags(cur_irq, IRQF_VALID); |
| 258 | #else |
| 259 | set_irq_noprobe(cur_irq); |
| 260 | #endif |
| 261 | |
| 262 | wcd9xxx->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); |
| 263 | wcd9xxx->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i); |
| 264 | wcd9xxx->irq_level[BIT_BYTE(i)] |= wcd9xxx_irqs[i].level << |
| 265 | (i % BITS_PER_BYTE); |
| 266 | } |
| 267 | for (i = 0; i < WCD9XXX_NUM_IRQ_REGS; i++) { |
| 268 | /* Initialize interrupt mask and level registers */ |
| 269 | wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_LEVEL0 + i, |
| 270 | wcd9xxx->irq_level[i]); |
| 271 | wcd9xxx_reg_write(wcd9xxx, TABLA_A_INTR_MASK0 + i, |
| 272 | wcd9xxx->irq_masks_cur[i]); |
| 273 | } |
| 274 | |
| 275 | ret = request_threaded_irq(wcd9xxx->irq, NULL, wcd9xxx_irq_thread, |
| 276 | IRQF_TRIGGER_HIGH | IRQF_ONESHOT, |
| 277 | "wcd9xxx", wcd9xxx); |
| 278 | if (ret != 0) |
| 279 | dev_err(wcd9xxx->dev, "Failed to request IRQ %d: %d\n", |
| 280 | wcd9xxx->irq, ret); |
| 281 | else { |
| 282 | ret = enable_irq_wake(wcd9xxx->irq); |
| 283 | if (ret == 0) { |
| 284 | ret = device_init_wakeup(wcd9xxx->dev, 1); |
| 285 | if (ret) { |
| 286 | dev_err(wcd9xxx->dev, "Failed to init device" |
| 287 | "wakeup : %d\n", ret); |
| 288 | disable_irq_wake(wcd9xxx->irq); |
| 289 | } |
| 290 | } else |
| 291 | dev_err(wcd9xxx->dev, "Failed to set wake interrupt on" |
| 292 | " IRQ %d: %d\n", wcd9xxx->irq, ret); |
| 293 | if (ret) |
| 294 | free_irq(wcd9xxx->irq, wcd9xxx); |
| 295 | } |
| 296 | |
| 297 | if (ret) |
| 298 | mutex_destroy(&wcd9xxx->irq_lock); |
| 299 | |
| 300 | return ret; |
| 301 | } |
| 302 | |
| 303 | void wcd9xxx_irq_exit(struct wcd9xxx *wcd9xxx) |
| 304 | { |
| 305 | if (wcd9xxx->irq) { |
| 306 | disable_irq_wake(wcd9xxx->irq); |
| 307 | free_irq(wcd9xxx->irq, wcd9xxx); |
| 308 | device_init_wakeup(wcd9xxx->dev, 0); |
| 309 | } |
| 310 | mutex_destroy(&wcd9xxx->irq_lock); |
| 311 | } |