Alan Stern | d58b4bc | 2012-07-11 11:21:54 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 by Alan Stern |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of the GNU General Public License as published by the |
| 6 | * Free Software Foundation; either version 2 of the License, or (at your |
| 7 | * option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but |
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * for more details. |
| 13 | */ |
| 14 | |
| 15 | /* This file is part of ehci-hcd.c */ |
| 16 | |
| 17 | /*-------------------------------------------------------------------------*/ |
| 18 | |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 19 | /* Set a bit in the USBCMD register */ |
| 20 | static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit) |
| 21 | { |
| 22 | ehci->command |= bit; |
| 23 | ehci_writel(ehci, ehci->command, &ehci->regs->command); |
| 24 | |
| 25 | /* unblock posted write */ |
| 26 | ehci_readl(ehci, &ehci->regs->command); |
| 27 | } |
| 28 | |
| 29 | /* Clear a bit in the USBCMD register */ |
| 30 | static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit) |
| 31 | { |
| 32 | ehci->command &= ~bit; |
| 33 | ehci_writel(ehci, ehci->command, &ehci->regs->command); |
| 34 | |
| 35 | /* unblock posted write */ |
| 36 | ehci_readl(ehci, &ehci->regs->command); |
| 37 | } |
| 38 | |
| 39 | /*-------------------------------------------------------------------------*/ |
| 40 | |
Alan Stern | d58b4bc | 2012-07-11 11:21:54 -0400 | [diff] [blame] | 41 | /* |
| 42 | * EHCI timer support... Now using hrtimers. |
| 43 | * |
| 44 | * Lots of different events are triggered from ehci->hrtimer. Whenever |
| 45 | * the timer routine runs, it checks each possible event; events that are |
| 46 | * currently enabled and whose expiration time has passed get handled. |
| 47 | * The set of enabled events is stored as a collection of bitflags in |
| 48 | * ehci->enabled_hrtimer_events, and they are numbered in order of |
| 49 | * increasing delay values (ranging between 1 ms and 100 ms). |
| 50 | * |
| 51 | * Rather than implementing a sorted list or tree of all pending events, |
| 52 | * we keep track only of the lowest-numbered pending event, in |
| 53 | * ehci->next_hrtimer_event. Whenever ehci->hrtimer gets restarted, its |
| 54 | * expiration time is set to the timeout value for this event. |
| 55 | * |
| 56 | * As a result, events might not get handled right away; the actual delay |
| 57 | * could be anywhere up to twice the requested delay. This doesn't |
| 58 | * matter, because none of the events are especially time-critical. The |
| 59 | * ones that matter most all have a delay of 1 ms, so they will be |
| 60 | * handled after 2 ms at most, which is okay. In addition to this, we |
| 61 | * allow for an expiration range of 1 ms. |
| 62 | */ |
| 63 | |
| 64 | /* |
| 65 | * Delay lengths for the hrtimer event types. |
| 66 | * Keep this list sorted by delay length, in the same order as |
| 67 | * the event types indexed by enum ehci_hrtimer_event in ehci.h. |
| 68 | */ |
| 69 | static unsigned event_delays_ns[] = { |
Alan Stern | 3144661 | 2012-07-11 11:22:21 -0400 | [diff] [blame] | 70 | 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */ |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 71 | 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */ |
Alan Stern | bf6387b | 2012-07-11 11:22:31 -0400 | [diff] [blame] | 72 | 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */ |
Alan Stern | df20225 | 2012-07-11 11:22:26 -0400 | [diff] [blame] | 73 | 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */ |
Alan Stern | 55934eb | 2012-07-11 11:22:35 -0400 | [diff] [blame] | 74 | 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */ |
Ming Lei | 9118f9e | 2013-07-03 22:53:10 +0800 | [diff] [blame] | 75 | 5 * NSEC_PER_MSEC, /* EHCI_HRTIMER_START_UNLINK_INTR */ |
Alan Stern | 32830f2 | 2012-07-11 11:22:53 -0400 | [diff] [blame] | 76 | 6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */ |
Alan Stern | 9d93874 | 2012-07-11 11:22:44 -0400 | [diff] [blame] | 77 | 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */ |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 78 | 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */ |
Alan Stern | 3144661 | 2012-07-11 11:22:21 -0400 | [diff] [blame] | 79 | 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */ |
Alan Stern | 18aafe6 | 2012-07-11 11:23:04 -0400 | [diff] [blame] | 80 | 100 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IO_WATCHDOG */ |
Alan Stern | d58b4bc | 2012-07-11 11:21:54 -0400 | [diff] [blame] | 81 | }; |
| 82 | |
| 83 | /* Enable a pending hrtimer event */ |
| 84 | static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event, |
| 85 | bool resched) |
| 86 | { |
| 87 | ktime_t *timeout = &ehci->hr_timeouts[event]; |
| 88 | |
| 89 | if (resched) |
| 90 | *timeout = ktime_add(ktime_get(), |
| 91 | ktime_set(0, event_delays_ns[event])); |
| 92 | ehci->enabled_hrtimer_events |= (1 << event); |
| 93 | |
| 94 | /* Track only the lowest-numbered pending event */ |
| 95 | if (event < ehci->next_hrtimer_event) { |
| 96 | ehci->next_hrtimer_event = event; |
| 97 | hrtimer_start_range_ns(&ehci->hrtimer, *timeout, |
| 98 | NSEC_PER_MSEC, HRTIMER_MODE_ABS); |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | |
Alan Stern | 3144661 | 2012-07-11 11:22:21 -0400 | [diff] [blame] | 103 | /* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */ |
| 104 | static void ehci_poll_ASS(struct ehci_hcd *ehci) |
| 105 | { |
| 106 | unsigned actual, want; |
| 107 | |
| 108 | /* Don't enable anything if the controller isn't running (e.g., died) */ |
| 109 | if (ehci->rh_state != EHCI_RH_RUNNING) |
| 110 | return; |
| 111 | |
| 112 | want = (ehci->command & CMD_ASE) ? STS_ASS : 0; |
| 113 | actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS; |
| 114 | |
| 115 | if (want != actual) { |
| 116 | |
Alan Stern | 6d5df89 | 2013-03-18 12:04:54 -0400 | [diff] [blame] | 117 | /* Poll again later, but give up after about 2-4 ms */ |
| 118 | if (ehci->ASS_poll_count++ < 2) { |
Alan Stern | 221f8df | 2013-02-26 13:43:41 -0500 | [diff] [blame] | 119 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); |
| 120 | return; |
| 121 | } |
| 122 | ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", |
| 123 | want, actual); |
Alan Stern | 3144661 | 2012-07-11 11:22:21 -0400 | [diff] [blame] | 124 | } |
| 125 | ehci->ASS_poll_count = 0; |
| 126 | |
| 127 | /* The status is up-to-date; restart or stop the schedule as needed */ |
| 128 | if (want == 0) { /* Stopped */ |
| 129 | if (ehci->async_count > 0) |
| 130 | ehci_set_command_bit(ehci, CMD_ASE); |
| 131 | |
| 132 | } else { /* Running */ |
| 133 | if (ehci->async_count == 0) { |
| 134 | |
| 135 | /* Turn off the schedule after a while */ |
| 136 | ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC, |
| 137 | true); |
| 138 | } |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | /* Turn off the async schedule after a brief delay */ |
| 143 | static void ehci_disable_ASE(struct ehci_hcd *ehci) |
| 144 | { |
| 145 | ehci_clear_command_bit(ehci, CMD_ASE); |
| 146 | } |
| 147 | |
| 148 | |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 149 | /* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */ |
| 150 | static void ehci_poll_PSS(struct ehci_hcd *ehci) |
| 151 | { |
| 152 | unsigned actual, want; |
| 153 | |
| 154 | /* Don't do anything if the controller isn't running (e.g., died) */ |
| 155 | if (ehci->rh_state != EHCI_RH_RUNNING) |
| 156 | return; |
| 157 | |
| 158 | want = (ehci->command & CMD_PSE) ? STS_PSS : 0; |
| 159 | actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS; |
| 160 | |
| 161 | if (want != actual) { |
| 162 | |
Alan Stern | 6d5df89 | 2013-03-18 12:04:54 -0400 | [diff] [blame] | 163 | /* Poll again later, but give up after about 2-4 ms */ |
| 164 | if (ehci->PSS_poll_count++ < 2) { |
Alan Stern | 221f8df | 2013-02-26 13:43:41 -0500 | [diff] [blame] | 165 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); |
| 166 | return; |
| 167 | } |
| 168 | ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", |
| 169 | want, actual); |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 170 | } |
| 171 | ehci->PSS_poll_count = 0; |
| 172 | |
| 173 | /* The status is up-to-date; restart or stop the schedule as needed */ |
| 174 | if (want == 0) { /* Stopped */ |
Alan Stern | 569b394 | 2012-07-11 11:23:00 -0400 | [diff] [blame] | 175 | if (ehci->periodic_count > 0) |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 176 | ehci_set_command_bit(ehci, CMD_PSE); |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 177 | |
| 178 | } else { /* Running */ |
| 179 | if (ehci->periodic_count == 0) { |
| 180 | |
| 181 | /* Turn off the schedule after a while */ |
| 182 | ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC, |
| 183 | true); |
| 184 | } |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | /* Turn off the periodic schedule after a brief delay */ |
| 189 | static void ehci_disable_PSE(struct ehci_hcd *ehci) |
| 190 | { |
| 191 | ehci_clear_command_bit(ehci, CMD_PSE); |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | |
Alan Stern | bf6387b | 2012-07-11 11:22:31 -0400 | [diff] [blame] | 195 | /* Poll the STS_HALT status bit; see when a dead controller stops */ |
| 196 | static void ehci_handle_controller_death(struct ehci_hcd *ehci) |
| 197 | { |
| 198 | if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) { |
| 199 | |
| 200 | /* Give up after a few milliseconds */ |
| 201 | if (ehci->died_poll_count++ < 5) { |
| 202 | /* Try again later */ |
| 203 | ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true); |
| 204 | return; |
| 205 | } |
| 206 | ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n"); |
| 207 | } |
| 208 | |
| 209 | /* Clean up the mess */ |
| 210 | ehci->rh_state = EHCI_RH_HALTED; |
| 211 | ehci_writel(ehci, 0, &ehci->regs->configured_flag); |
| 212 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); |
| 213 | ehci_work(ehci); |
Alan Stern | 3c273a0 | 2012-07-11 11:22:49 -0400 | [diff] [blame] | 214 | end_unlink_async(ehci); |
Alan Stern | bf6387b | 2012-07-11 11:22:31 -0400 | [diff] [blame] | 215 | |
| 216 | /* Not in process context, so don't try to reset the controller */ |
| 217 | } |
| 218 | |
Ming Lei | 9118f9e | 2013-07-03 22:53:10 +0800 | [diff] [blame] | 219 | /* start to unlink interrupt QHs */ |
| 220 | static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci) |
| 221 | { |
| 222 | bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); |
| 223 | |
| 224 | /* |
| 225 | * Process all the QHs on the intr_unlink list that were added |
| 226 | * before the current unlink cycle began. The list is in |
| 227 | * temporal order, so stop when we reach the first entry in the |
| 228 | * current cycle. But if the root hub isn't running then |
| 229 | * process all the QHs on the list. |
| 230 | */ |
| 231 | while (!list_empty(&ehci->intr_unlink_wait)) { |
| 232 | struct ehci_qh *qh; |
| 233 | |
| 234 | qh = list_first_entry(&ehci->intr_unlink_wait, |
| 235 | struct ehci_qh, unlink_node); |
| 236 | if (!stopped && (qh->unlink_cycle == |
| 237 | ehci->intr_unlink_wait_cycle)) |
| 238 | break; |
| 239 | list_del_init(&qh->unlink_node); |
| 240 | start_unlink_intr(ehci, qh); |
| 241 | } |
| 242 | |
| 243 | /* Handle remaining entries later */ |
| 244 | if (!list_empty(&ehci->intr_unlink_wait)) { |
| 245 | ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true); |
| 246 | ++ehci->intr_unlink_wait_cycle; |
| 247 | } |
| 248 | } |
Alan Stern | bf6387b | 2012-07-11 11:22:31 -0400 | [diff] [blame] | 249 | |
Alan Stern | df20225 | 2012-07-11 11:22:26 -0400 | [diff] [blame] | 250 | /* Handle unlinked interrupt QHs once they are gone from the hardware */ |
| 251 | static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci) |
| 252 | { |
| 253 | bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); |
| 254 | |
| 255 | /* |
| 256 | * Process all the QHs on the intr_unlink list that were added |
| 257 | * before the current unlink cycle began. The list is in |
| 258 | * temporal order, so stop when we reach the first entry in the |
| 259 | * current cycle. But if the root hub isn't running then |
| 260 | * process all the QHs on the list. |
| 261 | */ |
| 262 | ehci->intr_unlinking = true; |
Alan Stern | 6e01875 | 2013-03-22 13:31:45 -0400 | [diff] [blame] | 263 | while (!list_empty(&ehci->intr_unlink)) { |
| 264 | struct ehci_qh *qh; |
Alan Stern | df20225 | 2012-07-11 11:22:26 -0400 | [diff] [blame] | 265 | |
Alan Stern | 6e01875 | 2013-03-22 13:31:45 -0400 | [diff] [blame] | 266 | qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh, |
| 267 | unlink_node); |
Alan Stern | df20225 | 2012-07-11 11:22:26 -0400 | [diff] [blame] | 268 | if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle) |
| 269 | break; |
Ming Lei | 9118f9e | 2013-07-03 22:53:10 +0800 | [diff] [blame] | 270 | list_del_init(&qh->unlink_node); |
Alan Stern | df20225 | 2012-07-11 11:22:26 -0400 | [diff] [blame] | 271 | end_unlink_intr(ehci, qh); |
| 272 | } |
| 273 | |
| 274 | /* Handle remaining entries later */ |
Alan Stern | 6e01875 | 2013-03-22 13:31:45 -0400 | [diff] [blame] | 275 | if (!list_empty(&ehci->intr_unlink)) { |
Alan Stern | df20225 | 2012-07-11 11:22:26 -0400 | [diff] [blame] | 276 | ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true); |
| 277 | ++ehci->intr_unlink_cycle; |
| 278 | } |
| 279 | ehci->intr_unlinking = false; |
| 280 | } |
| 281 | |
| 282 | |
Alan Stern | 55934eb | 2012-07-11 11:22:35 -0400 | [diff] [blame] | 283 | /* Start another free-iTDs/siTDs cycle */ |
| 284 | static void start_free_itds(struct ehci_hcd *ehci) |
| 285 | { |
| 286 | if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) { |
| 287 | ehci->last_itd_to_free = list_entry( |
| 288 | ehci->cached_itd_list.prev, |
| 289 | struct ehci_itd, itd_list); |
| 290 | ehci->last_sitd_to_free = list_entry( |
| 291 | ehci->cached_sitd_list.prev, |
| 292 | struct ehci_sitd, sitd_list); |
| 293 | ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true); |
| 294 | } |
| 295 | } |
| 296 | |
| 297 | /* Wait for controller to stop using old iTDs and siTDs */ |
| 298 | static void end_free_itds(struct ehci_hcd *ehci) |
| 299 | { |
| 300 | struct ehci_itd *itd, *n; |
| 301 | struct ehci_sitd *sitd, *sn; |
| 302 | |
| 303 | if (ehci->rh_state < EHCI_RH_RUNNING) { |
| 304 | ehci->last_itd_to_free = NULL; |
| 305 | ehci->last_sitd_to_free = NULL; |
| 306 | } |
| 307 | |
| 308 | list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { |
| 309 | list_del(&itd->itd_list); |
| 310 | dma_pool_free(ehci->itd_pool, itd, itd->itd_dma); |
| 311 | if (itd == ehci->last_itd_to_free) |
| 312 | break; |
| 313 | } |
| 314 | list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { |
| 315 | list_del(&sitd->sitd_list); |
| 316 | dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma); |
| 317 | if (sitd == ehci->last_sitd_to_free) |
| 318 | break; |
| 319 | } |
| 320 | |
| 321 | if (!list_empty(&ehci->cached_itd_list) || |
| 322 | !list_empty(&ehci->cached_sitd_list)) |
| 323 | start_free_itds(ehci); |
| 324 | } |
| 325 | |
| 326 | |
Alan Stern | 9d93874 | 2012-07-11 11:22:44 -0400 | [diff] [blame] | 327 | /* Handle lost (or very late) IAA interrupts */ |
| 328 | static void ehci_iaa_watchdog(struct ehci_hcd *ehci) |
| 329 | { |
Alan Stern | 60fd4aa | 2013-03-18 12:05:19 -0400 | [diff] [blame] | 330 | u32 cmd, status; |
Alan Stern | 9d93874 | 2012-07-11 11:22:44 -0400 | [diff] [blame] | 331 | |
Alan Stern | 417c765 | 2013-03-21 12:48:42 -0400 | [diff] [blame] | 332 | /* |
| 333 | * Lost IAA irqs wedge things badly; seen first with a vt8235. |
| 334 | * So we need this watchdog, but must protect it against both |
| 335 | * (a) SMP races against real IAA firing and retriggering, and |
| 336 | * (b) clean HC shutdown, when IAA watchdog was pending. |
| 337 | */ |
Alan Stern | 214ac7a | 2013-03-22 13:31:58 -0400 | [diff] [blame] | 338 | if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING) |
Alan Stern | 417c765 | 2013-03-21 12:48:42 -0400 | [diff] [blame] | 339 | return; |
| 340 | |
Alan Stern | 60fd4aa | 2013-03-18 12:05:19 -0400 | [diff] [blame] | 341 | /* If we get here, IAA is *REALLY* late. It's barely |
| 342 | * conceivable that the system is so busy that CMD_IAAD |
| 343 | * is still legitimately set, so let's be sure it's |
| 344 | * clear before we read STS_IAA. (The HC should clear |
| 345 | * CMD_IAAD when it sets STS_IAA.) |
| 346 | */ |
| 347 | cmd = ehci_readl(ehci, &ehci->regs->command); |
Alan Stern | 9d93874 | 2012-07-11 11:22:44 -0400 | [diff] [blame] | 348 | |
Alan Stern | 60fd4aa | 2013-03-18 12:05:19 -0400 | [diff] [blame] | 349 | /* |
| 350 | * If IAA is set here it either legitimately triggered |
| 351 | * after the watchdog timer expired (_way_ late, so we'll |
| 352 | * still count it as lost) ... or a silicon erratum: |
| 353 | * - VIA seems to set IAA without triggering the IRQ; |
| 354 | * - IAAD potentially cleared without setting IAA. |
| 355 | */ |
| 356 | status = ehci_readl(ehci, &ehci->regs->status); |
| 357 | if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { |
| 358 | COUNT(ehci->stats.lost_iaa); |
| 359 | ehci_writel(ehci, STS_IAA, &ehci->regs->status); |
Alan Stern | 9d93874 | 2012-07-11 11:22:44 -0400 | [diff] [blame] | 360 | } |
Alan Stern | 60fd4aa | 2013-03-18 12:05:19 -0400 | [diff] [blame] | 361 | |
| 362 | ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd); |
| 363 | end_unlink_async(ehci); |
Alan Stern | 9d93874 | 2012-07-11 11:22:44 -0400 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | |
Alan Stern | 18aafe6 | 2012-07-11 11:23:04 -0400 | [diff] [blame] | 367 | /* Enable the I/O watchdog, if appropriate */ |
| 368 | static void turn_on_io_watchdog(struct ehci_hcd *ehci) |
| 369 | { |
| 370 | /* Not needed if the controller isn't running or it's already enabled */ |
| 371 | if (ehci->rh_state != EHCI_RH_RUNNING || |
| 372 | (ehci->enabled_hrtimer_events & |
| 373 | BIT(EHCI_HRTIMER_IO_WATCHDOG))) |
| 374 | return; |
| 375 | |
| 376 | /* |
| 377 | * Isochronous transfers always need the watchdog. |
| 378 | * For other sorts we use it only if the flag is set. |
| 379 | */ |
| 380 | if (ehci->isoc_count > 0 || (ehci->need_io_watchdog && |
| 381 | ehci->async_count + ehci->intr_count > 0)) |
| 382 | ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true); |
| 383 | } |
| 384 | |
| 385 | |
Alan Stern | d58b4bc | 2012-07-11 11:21:54 -0400 | [diff] [blame] | 386 | /* |
| 387 | * Handler functions for the hrtimer event types. |
| 388 | * Keep this array in the same order as the event types indexed by |
| 389 | * enum ehci_hrtimer_event in ehci.h. |
| 390 | */ |
| 391 | static void (*event_handlers[])(struct ehci_hcd *) = { |
Alan Stern | 3144661 | 2012-07-11 11:22:21 -0400 | [diff] [blame] | 392 | ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */ |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 393 | ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */ |
Alan Stern | bf6387b | 2012-07-11 11:22:31 -0400 | [diff] [blame] | 394 | ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */ |
Alan Stern | df20225 | 2012-07-11 11:22:26 -0400 | [diff] [blame] | 395 | ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */ |
Alan Stern | 55934eb | 2012-07-11 11:22:35 -0400 | [diff] [blame] | 396 | end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */ |
Ming Lei | 9118f9e | 2013-07-03 22:53:10 +0800 | [diff] [blame] | 397 | ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */ |
Alan Stern | 32830f2 | 2012-07-11 11:22:53 -0400 | [diff] [blame] | 398 | unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */ |
Alan Stern | 9d93874 | 2012-07-11 11:22:44 -0400 | [diff] [blame] | 399 | ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */ |
Alan Stern | 3ca9aeb | 2012-07-11 11:22:05 -0400 | [diff] [blame] | 400 | ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */ |
Alan Stern | 3144661 | 2012-07-11 11:22:21 -0400 | [diff] [blame] | 401 | ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */ |
Alan Stern | 18aafe6 | 2012-07-11 11:23:04 -0400 | [diff] [blame] | 402 | ehci_work, /* EHCI_HRTIMER_IO_WATCHDOG */ |
Alan Stern | d58b4bc | 2012-07-11 11:21:54 -0400 | [diff] [blame] | 403 | }; |
| 404 | |
| 405 | static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t) |
| 406 | { |
| 407 | struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer); |
| 408 | ktime_t now; |
| 409 | unsigned long events; |
| 410 | unsigned long flags; |
| 411 | unsigned e; |
| 412 | |
| 413 | spin_lock_irqsave(&ehci->lock, flags); |
| 414 | |
| 415 | events = ehci->enabled_hrtimer_events; |
| 416 | ehci->enabled_hrtimer_events = 0; |
| 417 | ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; |
| 418 | |
| 419 | /* |
| 420 | * Check each pending event. If its time has expired, handle |
| 421 | * the event; otherwise re-enable it. |
| 422 | */ |
| 423 | now = ktime_get(); |
| 424 | for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { |
| 425 | if (now.tv64 >= ehci->hr_timeouts[e].tv64) |
| 426 | event_handlers[e](ehci); |
| 427 | else |
| 428 | ehci_enable_event(ehci, e, false); |
| 429 | } |
| 430 | |
| 431 | spin_unlock_irqrestore(&ehci->lock, flags); |
| 432 | return HRTIMER_NORESTART; |
| 433 | } |