Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2000-2004 by David Brownell |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of the GNU General Public License as published by the |
| 6 | * Free Software Foundation; either version 2 of the License, or (at your |
| 7 | * option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but |
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software Foundation, |
| 16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 17 | */ |
| 18 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/module.h> |
| 20 | #include <linux/pci.h> |
| 21 | #include <linux/dmapool.h> |
| 22 | #include <linux/kernel.h> |
| 23 | #include <linux/delay.h> |
| 24 | #include <linux/ioport.h> |
| 25 | #include <linux/sched.h> |
| 26 | #include <linux/slab.h> |
| 27 | #include <linux/smp_lock.h> |
| 28 | #include <linux/errno.h> |
| 29 | #include <linux/init.h> |
| 30 | #include <linux/timer.h> |
| 31 | #include <linux/list.h> |
| 32 | #include <linux/interrupt.h> |
| 33 | #include <linux/reboot.h> |
| 34 | #include <linux/usb.h> |
| 35 | #include <linux/moduleparam.h> |
| 36 | #include <linux/dma-mapping.h> |
| 37 | |
| 38 | #include "../core/hcd.h" |
| 39 | |
| 40 | #include <asm/byteorder.h> |
| 41 | #include <asm/io.h> |
| 42 | #include <asm/irq.h> |
| 43 | #include <asm/system.h> |
| 44 | #include <asm/unaligned.h> |
| 45 | |
| 46 | |
| 47 | /*-------------------------------------------------------------------------*/ |
| 48 | |
| 49 | /* |
| 50 | * EHCI hc_driver implementation ... experimental, incomplete. |
| 51 | * Based on the final 1.0 register interface specification. |
| 52 | * |
| 53 | * USB 2.0 shows up in upcoming www.pcmcia.org technology. |
| 54 | * First was PCMCIA, like ISA; then CardBus, which is PCI. |
| 55 | * Next comes "CardBay", using USB 2.0 signals. |
| 56 | * |
| 57 | * Contains additional contributions by Brad Hards, Rory Bolt, and others. |
| 58 | * Special thanks to Intel and VIA for providing host controllers to |
| 59 | * test this driver on, and Cypress (including In-System Design) for |
| 60 | * providing early devices for those host controllers to talk to! |
| 61 | * |
| 62 | * HISTORY: |
| 63 | * |
| 64 | * 2004-05-10 Root hub and PCI suspend/resume support; remote wakeup. (db) |
| 65 | * 2004-02-24 Replace pci_* with generic dma_* API calls (dsaxena@plexity.net) |
| 66 | * 2003-12-29 Rewritten high speed iso transfer support (by Michal Sojka, |
| 67 | * <sojkam@centrum.cz>, updates by DB). |
| 68 | * |
| 69 | * 2002-11-29 Correct handling for hw async_next register. |
| 70 | * 2002-08-06 Handling for bulk and interrupt transfers is mostly shared; |
| 71 | * only scheduling is different, no arbitrary limitations. |
| 72 | * 2002-07-25 Sanity check PCI reads, mostly for better cardbus support, |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 73 | * clean up HC run state handshaking. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | * 2002-05-24 Preliminary FS/LS interrupts, using scheduling shortcuts |
| 75 | * 2002-05-11 Clear TT errors for FS/LS ctrl/bulk. Fill in some other |
| 76 | * missing pieces: enabling 64bit dma, handoff from BIOS/SMM. |
| 77 | * 2002-05-07 Some error path cleanups to report better errors; wmb(); |
| 78 | * use non-CVS version id; better iso bandwidth claim. |
| 79 | * 2002-04-19 Control/bulk/interrupt submit no longer uses giveback() on |
| 80 | * errors in submit path. Bugfixes to interrupt scheduling/processing. |
| 81 | * 2002-03-05 Initial high-speed ISO support; reduce ITD memory; shift |
| 82 | * more checking to generic hcd framework (db). Make it work with |
| 83 | * Philips EHCI; reduce PCI traffic; shorten IRQ path (Rory Bolt). |
| 84 | * 2002-01-14 Minor cleanup; version synch. |
| 85 | * 2002-01-08 Fix roothub handoff of FS/LS to companion controllers. |
| 86 | * 2002-01-04 Control/Bulk queuing behaves. |
| 87 | * |
| 88 | * 2001-12-12 Initial patch version for Linux 2.5.1 kernel. |
| 89 | * 2001-June Works with usb-storage and NEC EHCI on 2.4 |
| 90 | */ |
| 91 | |
| 92 | #define DRIVER_VERSION "10 Dec 2004" |
| 93 | #define DRIVER_AUTHOR "David Brownell" |
| 94 | #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" |
| 95 | |
| 96 | static const char hcd_name [] = "ehci_hcd"; |
| 97 | |
| 98 | |
| 99 | #undef EHCI_VERBOSE_DEBUG |
| 100 | #undef EHCI_URB_TRACE |
| 101 | |
| 102 | #ifdef DEBUG |
| 103 | #define EHCI_STATS |
| 104 | #endif |
| 105 | |
| 106 | /* magic numbers that can affect system performance */ |
| 107 | #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ |
| 108 | #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ |
| 109 | #define EHCI_TUNE_RL_TT 0 |
| 110 | #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ |
| 111 | #define EHCI_TUNE_MULT_TT 1 |
| 112 | #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */ |
| 113 | |
Greg Kroah-Hartman | 64f8979 | 2006-10-17 13:57:18 -0700 | [diff] [blame] | 114 | #define EHCI_IAA_JIFFIES (HZ/100) /* arbitrary; ~10 msec */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | #define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */ |
| 116 | #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */ |
| 117 | #define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */ |
| 118 | |
| 119 | /* Initial IRQ latency: faster than hw default */ |
| 120 | static int log2_irq_thresh = 0; // 0 to 6 |
| 121 | module_param (log2_irq_thresh, int, S_IRUGO); |
| 122 | MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); |
| 123 | |
| 124 | /* initial park setting: slower than hw default */ |
| 125 | static unsigned park = 0; |
| 126 | module_param (park, uint, S_IRUGO); |
| 127 | MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); |
| 128 | |
| 129 | #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) |
| 130 | |
| 131 | /*-------------------------------------------------------------------------*/ |
| 132 | |
| 133 | #include "ehci.h" |
| 134 | #include "ehci-dbg.c" |
| 135 | |
| 136 | /*-------------------------------------------------------------------------*/ |
| 137 | |
| 138 | /* |
| 139 | * handshake - spin reading hc until handshake completes or fails |
| 140 | * @ptr: address of hc register to be read |
| 141 | * @mask: bits to look at in result of read |
| 142 | * @done: value of those bits when handshake succeeds |
| 143 | * @usec: timeout in microseconds |
| 144 | * |
| 145 | * Returns negative errno, or zero on success |
| 146 | * |
| 147 | * Success happens when the "mask" bits have the specified value (hardware |
| 148 | * handshake done). There are two failure modes: "usec" have passed (major |
| 149 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
| 150 | * |
| 151 | * That last failure should_only happen in cases like physical cardbus eject |
| 152 | * before driver shutdown. But it also seems to be caused by bugs in cardbus |
| 153 | * bridge shutdown: shutting down the bridge before the devices using it. |
| 154 | */ |
| 155 | static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec) |
| 156 | { |
| 157 | u32 result; |
| 158 | |
| 159 | do { |
| 160 | result = readl (ptr); |
| 161 | if (result == ~(u32)0) /* card removed */ |
| 162 | return -ENODEV; |
| 163 | result &= mask; |
| 164 | if (result == done) |
| 165 | return 0; |
| 166 | udelay (1); |
| 167 | usec--; |
| 168 | } while (usec > 0); |
| 169 | return -ETIMEDOUT; |
| 170 | } |
| 171 | |
| 172 | /* force HC to halt state from unknown (EHCI spec section 2.3) */ |
| 173 | static int ehci_halt (struct ehci_hcd *ehci) |
| 174 | { |
| 175 | u32 temp = readl (&ehci->regs->status); |
| 176 | |
David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 177 | /* disable any irqs left enabled by previous code */ |
| 178 | writel (0, &ehci->regs->intr_enable); |
| 179 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | if ((temp & STS_HALT) != 0) |
| 181 | return 0; |
| 182 | |
| 183 | temp = readl (&ehci->regs->command); |
| 184 | temp &= ~CMD_RUN; |
| 185 | writel (temp, &ehci->regs->command); |
| 186 | return handshake (&ehci->regs->status, STS_HALT, STS_HALT, 16 * 125); |
| 187 | } |
| 188 | |
| 189 | /* put TDI/ARC silicon into EHCI mode */ |
| 190 | static void tdi_reset (struct ehci_hcd *ehci) |
| 191 | { |
| 192 | u32 __iomem *reg_ptr; |
| 193 | u32 tmp; |
| 194 | |
| 195 | reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68); |
| 196 | tmp = readl (reg_ptr); |
| 197 | tmp |= 0x3; |
| 198 | writel (tmp, reg_ptr); |
| 199 | } |
| 200 | |
| 201 | /* reset a non-running (STS_HALT == 1) controller */ |
| 202 | static int ehci_reset (struct ehci_hcd *ehci) |
| 203 | { |
| 204 | int retval; |
| 205 | u32 command = readl (&ehci->regs->command); |
| 206 | |
| 207 | command |= CMD_RESET; |
| 208 | dbg_cmd (ehci, "reset", command); |
| 209 | writel (command, &ehci->regs->command); |
| 210 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; |
| 211 | ehci->next_statechange = jiffies; |
| 212 | retval = handshake (&ehci->regs->command, CMD_RESET, 0, 250 * 1000); |
| 213 | |
| 214 | if (retval) |
| 215 | return retval; |
| 216 | |
| 217 | if (ehci_is_TDI(ehci)) |
| 218 | tdi_reset (ehci); |
| 219 | |
| 220 | return retval; |
| 221 | } |
| 222 | |
| 223 | /* idle the controller (from running) */ |
| 224 | static void ehci_quiesce (struct ehci_hcd *ehci) |
| 225 | { |
| 226 | u32 temp; |
| 227 | |
| 228 | #ifdef DEBUG |
| 229 | if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) |
| 230 | BUG (); |
| 231 | #endif |
| 232 | |
| 233 | /* wait for any schedule enables/disables to take effect */ |
| 234 | temp = readl (&ehci->regs->command) << 10; |
| 235 | temp &= STS_ASS | STS_PSS; |
| 236 | if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, |
| 237 | temp, 16 * 125) != 0) { |
| 238 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; |
| 239 | return; |
| 240 | } |
| 241 | |
| 242 | /* then disable anything that's still active */ |
| 243 | temp = readl (&ehci->regs->command); |
| 244 | temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); |
| 245 | writel (temp, &ehci->regs->command); |
| 246 | |
| 247 | /* hardware can take 16 microframes to turn off ... */ |
| 248 | if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, |
| 249 | 0, 16 * 125) != 0) { |
| 250 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; |
| 251 | return; |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | /*-------------------------------------------------------------------------*/ |
| 256 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 257 | static void ehci_work(struct ehci_hcd *ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | |
| 259 | #include "ehci-hub.c" |
| 260 | #include "ehci-mem.c" |
| 261 | #include "ehci-q.c" |
| 262 | #include "ehci-sched.c" |
| 263 | |
| 264 | /*-------------------------------------------------------------------------*/ |
| 265 | |
| 266 | static void ehci_watchdog (unsigned long param) |
| 267 | { |
| 268 | struct ehci_hcd *ehci = (struct ehci_hcd *) param; |
| 269 | unsigned long flags; |
| 270 | |
| 271 | spin_lock_irqsave (&ehci->lock, flags); |
| 272 | |
Greg Kroah-Hartman | 64f8979 | 2006-10-17 13:57:18 -0700 | [diff] [blame] | 273 | /* lost IAA irqs wedge things badly; seen with a vt8235 */ |
| 274 | if (ehci->reclaim) { |
| 275 | u32 status = readl (&ehci->regs->status); |
| 276 | if (status & STS_IAA) { |
| 277 | ehci_vdbg (ehci, "lost IAA\n"); |
| 278 | COUNT (ehci->stats.lost_iaa); |
| 279 | writel (STS_IAA, &ehci->regs->status); |
| 280 | ehci->reclaim_ready = 1; |
| 281 | } |
| 282 | } |
| 283 | |
| 284 | /* stop async processing after it's idled a bit */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | if (test_bit (TIMER_ASYNC_OFF, &ehci->actions)) |
David Brownell | 26f953f | 2006-09-18 17:03:16 -0700 | [diff] [blame] | 286 | start_unlink_async (ehci, ehci->async); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
| 288 | /* ehci could run by timer, without IRQs ... */ |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 289 | ehci_work (ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | |
| 291 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 292 | } |
| 293 | |
Aleksey Gorelov | 64a21d0 | 2006-08-08 17:24:08 -0700 | [diff] [blame] | 294 | /* ehci_shutdown kick in for silicon on any bus (not just pci, etc). |
David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 295 | * This forcibly disables dma and IRQs, helping kexec and other cases |
| 296 | * where the next system software may expect clean state. |
| 297 | */ |
Aleksey Gorelov | 64a21d0 | 2006-08-08 17:24:08 -0700 | [diff] [blame] | 298 | static void |
| 299 | ehci_shutdown (struct usb_hcd *hcd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | { |
Aleksey Gorelov | 64a21d0 | 2006-08-08 17:24:08 -0700 | [diff] [blame] | 301 | struct ehci_hcd *ehci; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | |
Aleksey Gorelov | 64a21d0 | 2006-08-08 17:24:08 -0700 | [diff] [blame] | 303 | ehci = hcd_to_ehci (hcd); |
David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 304 | (void) ehci_halt (ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
| 306 | /* make BIOS/etc use companion controller during reboot */ |
| 307 | writel (0, &ehci->regs->configured_flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | } |
| 309 | |
David Brownell | 56c1e26 | 2005-04-09 09:00:29 -0700 | [diff] [blame] | 310 | static void ehci_port_power (struct ehci_hcd *ehci, int is_on) |
| 311 | { |
| 312 | unsigned port; |
| 313 | |
| 314 | if (!HCS_PPC (ehci->hcs_params)) |
| 315 | return; |
| 316 | |
| 317 | ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down"); |
| 318 | for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) |
| 319 | (void) ehci_hub_control(ehci_to_hcd(ehci), |
| 320 | is_on ? SetPortFeature : ClearPortFeature, |
| 321 | USB_PORT_FEAT_POWER, |
| 322 | port--, NULL, 0); |
| 323 | msleep(20); |
| 324 | } |
| 325 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 326 | /*-------------------------------------------------------------------------*/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 328 | /* |
| 329 | * ehci_work is called from some interrupts, timers, and so on. |
| 330 | * it calls driver completion functions, after dropping ehci->lock. |
| 331 | */ |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 332 | static void ehci_work (struct ehci_hcd *ehci) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | { |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 334 | timer_action_done (ehci, TIMER_IO_WATCHDOG); |
Greg Kroah-Hartman | 64f8979 | 2006-10-17 13:57:18 -0700 | [diff] [blame] | 335 | if (ehci->reclaim_ready) |
| 336 | end_unlink_async (ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 338 | /* another CPU may drop ehci->lock during a schedule scan while |
| 339 | * it reports urb completions. this flag guards against bogus |
| 340 | * attempts at re-entrant schedule scanning. |
| 341 | */ |
| 342 | if (ehci->scanning) |
| 343 | return; |
| 344 | ehci->scanning = 1; |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 345 | scan_async (ehci); |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 346 | if (ehci->next_uframe != -1) |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 347 | scan_periodic (ehci); |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 348 | ehci->scanning = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 350 | /* the IO watchdog guards against hardware or driver bugs that |
| 351 | * misplace IRQs, and should let us run completely without IRQs. |
| 352 | * such lossage has been observed on both VT6202 and VT8235. |
| 353 | */ |
| 354 | if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && |
| 355 | (ehci->async->qh_next.ptr != NULL || |
| 356 | ehci->periodic_sched != 0)) |
| 357 | timer_action (ehci, TIMER_IO_WATCHDOG); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } |
| 359 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 360 | static void ehci_stop (struct usb_hcd *hcd) |
| 361 | { |
| 362 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 363 | |
| 364 | ehci_dbg (ehci, "stop\n"); |
| 365 | |
| 366 | /* Turn off port power on all root hub ports. */ |
| 367 | ehci_port_power (ehci, 0); |
| 368 | |
| 369 | /* no more interrupts ... */ |
| 370 | del_timer_sync (&ehci->watchdog); |
| 371 | |
| 372 | spin_lock_irq(&ehci->lock); |
| 373 | if (HC_IS_RUNNING (hcd->state)) |
| 374 | ehci_quiesce (ehci); |
| 375 | |
| 376 | ehci_reset (ehci); |
| 377 | writel (0, &ehci->regs->intr_enable); |
| 378 | spin_unlock_irq(&ehci->lock); |
| 379 | |
| 380 | /* let companion controllers work when we aren't */ |
| 381 | writel (0, &ehci->regs->configured_flag); |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 382 | |
| 383 | remove_debug_files (ehci); |
| 384 | |
| 385 | /* root hub is shut down separately (first, when possible) */ |
| 386 | spin_lock_irq (&ehci->lock); |
| 387 | if (ehci->async) |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 388 | ehci_work (ehci); |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 389 | spin_unlock_irq (&ehci->lock); |
| 390 | ehci_mem_cleanup (ehci); |
| 391 | |
| 392 | #ifdef EHCI_STATS |
| 393 | ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n", |
| 394 | ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim, |
| 395 | ehci->stats.lost_iaa); |
| 396 | ehci_dbg (ehci, "complete %ld unlink %ld\n", |
| 397 | ehci->stats.complete, ehci->stats.unlink); |
| 398 | #endif |
| 399 | |
| 400 | dbg_status (ehci, "ehci_stop completed", readl (&ehci->regs->status)); |
| 401 | } |
| 402 | |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 403 | /* one-time init, only for memory state */ |
| 404 | static int ehci_init(struct usb_hcd *hcd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | { |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 406 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | u32 temp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | int retval; |
| 409 | u32 hcc_params; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 411 | spin_lock_init(&ehci->lock); |
| 412 | |
| 413 | init_timer(&ehci->watchdog); |
| 414 | ehci->watchdog.function = ehci_watchdog; |
| 415 | ehci->watchdog.data = (unsigned long) ehci; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | |
| 417 | /* |
| 418 | * hw default: 1K periodic list heads, one per frame. |
| 419 | * periodic_size can shrink by USBCMD update if hcc_params allows. |
| 420 | */ |
| 421 | ehci->periodic_size = DEFAULT_I_TDPS; |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 422 | if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | return retval; |
| 424 | |
| 425 | /* controllers may cache some of the periodic schedule ... */ |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 426 | hcc_params = readl(&ehci->caps->hcc_params); |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 427 | if (HCC_ISOC_CACHE(hcc_params)) // full frame cache |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | ehci->i_thresh = 8; |
| 429 | else // N microframes cached |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 430 | ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | |
| 432 | ehci->reclaim = NULL; |
Greg Kroah-Hartman | 64f8979 | 2006-10-17 13:57:18 -0700 | [diff] [blame] | 433 | ehci->reclaim_ready = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | ehci->next_uframe = -1; |
| 435 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | /* |
| 437 | * dedicate a qh for the async ring head, since we couldn't unlink |
| 438 | * a 'real' qh without stopping the async schedule [4.8]. use it |
| 439 | * as the 'reclamation list head' too. |
| 440 | * its dummy is used in hw_alt_next of many tds, to prevent the qh |
| 441 | * from automatically advancing to the next td after short reads. |
| 442 | */ |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 443 | ehci->async->qh_next.qh = NULL; |
| 444 | ehci->async->hw_next = QH_NEXT(ehci->async->qh_dma); |
| 445 | ehci->async->hw_info1 = cpu_to_le32(QH_HEAD); |
| 446 | ehci->async->hw_token = cpu_to_le32(QTD_STS_HALT); |
| 447 | ehci->async->hw_qtd_next = EHCI_LIST_END; |
| 448 | ehci->async->qh_state = QH_STATE_LINKED; |
| 449 | ehci->async->hw_alt_next = QTD_NEXT(ehci->async->dummy->qtd_dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | |
| 451 | /* clear interrupt enables, set irq latency */ |
| 452 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) |
| 453 | log2_irq_thresh = 0; |
| 454 | temp = 1 << (16 + log2_irq_thresh); |
| 455 | if (HCC_CANPARK(hcc_params)) { |
| 456 | /* HW default park == 3, on hardware that supports it (like |
| 457 | * NVidia and ALI silicon), maximizes throughput on the async |
| 458 | * schedule by avoiding QH fetches between transfers. |
| 459 | * |
| 460 | * With fast usb storage devices and NForce2, "park" seems to |
| 461 | * make problems: throughput reduction (!), data errors... |
| 462 | */ |
| 463 | if (park) { |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 464 | park = min(park, (unsigned) 3); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | temp |= CMD_PARK; |
| 466 | temp |= park << 8; |
| 467 | } |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 468 | ehci_dbg(ehci, "park %d\n", park); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | } |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 470 | if (HCC_PGM_FRAMELISTLEN(hcc_params)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | /* periodic schedule size can be smaller than default */ |
| 472 | temp &= ~(3 << 2); |
| 473 | temp |= (EHCI_TUNE_FLS << 2); |
| 474 | switch (EHCI_TUNE_FLS) { |
| 475 | case 0: ehci->periodic_size = 1024; break; |
| 476 | case 1: ehci->periodic_size = 512; break; |
| 477 | case 2: ehci->periodic_size = 256; break; |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 478 | default: BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | } |
| 480 | } |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 481 | ehci->command = temp; |
| 482 | |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 483 | return 0; |
| 484 | } |
| 485 | |
| 486 | /* start HC running; it's halted, ehci_init() has been run (once) */ |
| 487 | static int ehci_run (struct usb_hcd *hcd) |
| 488 | { |
| 489 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 490 | int retval; |
| 491 | u32 temp; |
| 492 | u32 hcc_params; |
| 493 | |
| 494 | /* EHCI spec section 4.1 */ |
| 495 | if ((retval = ehci_reset(ehci)) != 0) { |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 496 | ehci_mem_cleanup(ehci); |
| 497 | return retval; |
| 498 | } |
| 499 | writel(ehci->periodic_dma, &ehci->regs->frame_list); |
| 500 | writel((u32)ehci->async->qh_dma, &ehci->regs->async_next); |
| 501 | |
| 502 | /* |
| 503 | * hcc_params controls whether ehci->regs->segment must (!!!) |
| 504 | * be used; it constrains QH/ITD/SITD and QTD locations. |
| 505 | * pci_pool consistent memory always uses segment zero. |
| 506 | * streaming mappings for I/O buffers, like pci_map_single(), |
| 507 | * can return segments above 4GB, if the device allows. |
| 508 | * |
| 509 | * NOTE: the dma mask is visible through dma_supported(), so |
| 510 | * drivers can pass this info along ... like NETIF_F_HIGHDMA, |
| 511 | * Scsi_Host.highmem_io, and so forth. It's readonly to all |
| 512 | * host side drivers though. |
| 513 | */ |
| 514 | hcc_params = readl(&ehci->caps->hcc_params); |
| 515 | if (HCC_64BIT_ADDR(hcc_params)) { |
| 516 | writel(0, &ehci->regs->segment); |
| 517 | #if 0 |
| 518 | // this is deeply broken on almost all architectures |
| 519 | if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK)) |
| 520 | ehci_info(ehci, "enabled 64bit DMA\n"); |
| 521 | #endif |
| 522 | } |
| 523 | |
| 524 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | // Philips, Intel, and maybe others need CMD_RUN before the |
| 526 | // root hub will detect new devices (why?); NEC doesn't |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 527 | ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); |
| 528 | ehci->command |= CMD_RUN; |
| 529 | writel (ehci->command, &ehci->regs->command); |
| 530 | dbg_cmd (ehci, "init", ehci->command); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | /* |
| 533 | * Start, enabling full USB 2.0 functionality ... usb 1.1 devices |
| 534 | * are explicitly handed to companion controller(s), so no TT is |
| 535 | * involved with the root hub. (Except where one is integrated, |
| 536 | * and there's no companion controller unless maybe for USB OTG.) |
| 537 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | hcd->state = HC_STATE_RUNNING; |
| 539 | writel (FLAG_CF, &ehci->regs->configured_flag); |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 540 | readl (&ehci->regs->command); /* unblock posted writes */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | |
| 542 | temp = HC_VERSION(readl (&ehci->caps->hc_capbase)); |
| 543 | ehci_info (ehci, |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 544 | "USB %x.%x started, EHCI %x.%02x, driver %s\n", |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 545 | ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | temp >> 8, temp & 0xff, DRIVER_VERSION); |
| 547 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */ |
| 549 | |
David Brownell | 1880752 | 2005-11-23 15:45:37 -0800 | [diff] [blame] | 550 | /* GRR this is run-once init(), being done every time the HC starts. |
| 551 | * So long as they're part of class devices, we can't do it init() |
| 552 | * since the class device isn't created that early. |
| 553 | */ |
| 554 | create_debug_files(ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | |
| 556 | return 0; |
| 557 | } |
| 558 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | /*-------------------------------------------------------------------------*/ |
| 560 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 561 | static irqreturn_t ehci_irq (struct usb_hcd *hcd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | { |
| 563 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 564 | u32 status; |
| 565 | int bh; |
| 566 | |
| 567 | spin_lock (&ehci->lock); |
| 568 | |
| 569 | status = readl (&ehci->regs->status); |
| 570 | |
| 571 | /* e.g. cardbus physical eject */ |
| 572 | if (status == ~(u32) 0) { |
| 573 | ehci_dbg (ehci, "device removed\n"); |
| 574 | goto dead; |
| 575 | } |
| 576 | |
| 577 | status &= INTR_MASK; |
| 578 | if (!status) { /* irq sharing? */ |
| 579 | spin_unlock(&ehci->lock); |
| 580 | return IRQ_NONE; |
| 581 | } |
| 582 | |
| 583 | /* clear (just) interrupts */ |
| 584 | writel (status, &ehci->regs->status); |
| 585 | readl (&ehci->regs->command); /* unblock posted write */ |
| 586 | bh = 0; |
| 587 | |
| 588 | #ifdef EHCI_VERBOSE_DEBUG |
| 589 | /* unrequested/ignored: Frame List Rollover */ |
| 590 | dbg_status (ehci, "irq", status); |
| 591 | #endif |
| 592 | |
| 593 | /* INT, ERR, and IAA interrupt rates can be throttled */ |
| 594 | |
| 595 | /* normal [4.15.1.2] or error [4.15.1.1] completion */ |
| 596 | if (likely ((status & (STS_INT|STS_ERR)) != 0)) { |
| 597 | if (likely ((status & STS_ERR) == 0)) |
| 598 | COUNT (ehci->stats.normal); |
| 599 | else |
| 600 | COUNT (ehci->stats.error); |
| 601 | bh = 1; |
| 602 | } |
| 603 | |
| 604 | /* complete the unlinking of some qh [4.15.2.3] */ |
| 605 | if (status & STS_IAA) { |
| 606 | COUNT (ehci->stats.reclaim); |
Greg Kroah-Hartman | 64f8979 | 2006-10-17 13:57:18 -0700 | [diff] [blame] | 607 | ehci->reclaim_ready = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | bh = 1; |
| 609 | } |
| 610 | |
| 611 | /* remote wakeup [4.3.1] */ |
David Brownell | d97cc2f | 2005-12-22 17:05:18 -0800 | [diff] [blame] | 612 | if (status & STS_PCD) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | unsigned i = HCS_N_PORTS (ehci->hcs_params); |
| 614 | |
| 615 | /* resume root hub? */ |
| 616 | status = readl (&ehci->regs->command); |
| 617 | if (!(status & CMD_RUN)) |
| 618 | writel (status | CMD_RUN, &ehci->regs->command); |
| 619 | |
| 620 | while (i--) { |
David Brownell | b972b68 | 2006-06-30 02:34:42 -0700 | [diff] [blame] | 621 | int pstatus = readl (&ehci->regs->port_status [i]); |
| 622 | |
| 623 | if (pstatus & PORT_OWNER) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | continue; |
David Brownell | b972b68 | 2006-06-30 02:34:42 -0700 | [diff] [blame] | 625 | if (!(pstatus & PORT_RESUME) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | || ehci->reset_done [i] != 0) |
| 627 | continue; |
| 628 | |
| 629 | /* start 20 msec resume signaling from this port, |
| 630 | * and make khubd collect PORT_STAT_C_SUSPEND to |
| 631 | * stop that signaling. |
| 632 | */ |
| 633 | ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); |
David Brownell | f03c17f | 2005-11-23 15:45:28 -0800 | [diff] [blame] | 635 | usb_hcd_resume_root_hub(hcd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | } |
| 637 | } |
| 638 | |
| 639 | /* PCI errors [4.15.2.4] */ |
| 640 | if (unlikely ((status & STS_FATAL) != 0)) { |
| 641 | /* bogus "fatal" IRQs appear on some chips... why? */ |
| 642 | status = readl (&ehci->regs->status); |
| 643 | dbg_cmd (ehci, "fatal", readl (&ehci->regs->command)); |
| 644 | dbg_status (ehci, "fatal", status); |
| 645 | if (status & STS_HALT) { |
| 646 | ehci_err (ehci, "fatal error\n"); |
| 647 | dead: |
| 648 | ehci_reset (ehci); |
| 649 | writel (0, &ehci->regs->configured_flag); |
| 650 | /* generic layer kills/unlinks all urbs, then |
| 651 | * uses ehci_stop to clean up the rest |
| 652 | */ |
| 653 | bh = 1; |
| 654 | } |
| 655 | } |
| 656 | |
| 657 | if (bh) |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 658 | ehci_work (ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | spin_unlock (&ehci->lock); |
| 660 | return IRQ_HANDLED; |
| 661 | } |
| 662 | |
| 663 | /*-------------------------------------------------------------------------*/ |
| 664 | |
| 665 | /* |
| 666 | * non-error returns are a promise to giveback() the urb later |
| 667 | * we drop ownership so next owner (or urb unlink) can get it |
| 668 | * |
| 669 | * urb + dev is in hcd.self.controller.urb_list |
| 670 | * we're queueing TDs onto software and hardware lists |
| 671 | * |
| 672 | * hcd-specific init for hcpriv hasn't been done yet |
| 673 | * |
| 674 | * NOTE: control, bulk, and interrupt share the same code to append TDs |
| 675 | * to a (possibly active) QH, and the same QH scanning code. |
| 676 | */ |
| 677 | static int ehci_urb_enqueue ( |
| 678 | struct usb_hcd *hcd, |
| 679 | struct usb_host_endpoint *ep, |
| 680 | struct urb *urb, |
Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 681 | gfp_t mem_flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | ) { |
| 683 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 684 | struct list_head qtd_list; |
| 685 | |
| 686 | INIT_LIST_HEAD (&qtd_list); |
| 687 | |
| 688 | switch (usb_pipetype (urb->pipe)) { |
| 689 | // case PIPE_CONTROL: |
| 690 | // case PIPE_BULK: |
| 691 | default: |
| 692 | if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) |
| 693 | return -ENOMEM; |
| 694 | return submit_async (ehci, ep, urb, &qtd_list, mem_flags); |
| 695 | |
| 696 | case PIPE_INTERRUPT: |
| 697 | if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) |
| 698 | return -ENOMEM; |
| 699 | return intr_submit (ehci, ep, urb, &qtd_list, mem_flags); |
| 700 | |
| 701 | case PIPE_ISOCHRONOUS: |
| 702 | if (urb->dev->speed == USB_SPEED_HIGH) |
| 703 | return itd_submit (ehci, urb, mem_flags); |
| 704 | else |
| 705 | return sitd_submit (ehci, urb, mem_flags); |
| 706 | } |
| 707 | } |
| 708 | |
| 709 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 710 | { |
Greg Kroah-Hartman | 64f8979 | 2006-10-17 13:57:18 -0700 | [diff] [blame] | 711 | /* if we need to use IAA and it's busy, defer */ |
| 712 | if (qh->qh_state == QH_STATE_LINKED |
| 713 | && ehci->reclaim |
| 714 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | struct ehci_qh *last; |
| 716 | |
| 717 | for (last = ehci->reclaim; |
| 718 | last->reclaim; |
| 719 | last = last->reclaim) |
| 720 | continue; |
| 721 | qh->qh_state = QH_STATE_UNLINK_WAIT; |
| 722 | last->reclaim = qh; |
| 723 | |
Greg Kroah-Hartman | 64f8979 | 2006-10-17 13:57:18 -0700 | [diff] [blame] | 724 | /* bypass IAA if the hc can't care */ |
| 725 | } else if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && ehci->reclaim) |
| 726 | end_unlink_async (ehci); |
| 727 | |
| 728 | /* something else might have unlinked the qh by now */ |
| 729 | if (qh->qh_state == QH_STATE_LINKED) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | start_unlink_async (ehci, qh); |
| 731 | } |
| 732 | |
| 733 | /* remove from hardware lists |
| 734 | * completions normally happen asynchronously |
| 735 | */ |
| 736 | |
| 737 | static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) |
| 738 | { |
| 739 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 740 | struct ehci_qh *qh; |
| 741 | unsigned long flags; |
| 742 | |
| 743 | spin_lock_irqsave (&ehci->lock, flags); |
| 744 | switch (usb_pipetype (urb->pipe)) { |
| 745 | // case PIPE_CONTROL: |
| 746 | // case PIPE_BULK: |
| 747 | default: |
| 748 | qh = (struct ehci_qh *) urb->hcpriv; |
| 749 | if (!qh) |
| 750 | break; |
Greg Kroah-Hartman | 64f8979 | 2006-10-17 13:57:18 -0700 | [diff] [blame] | 751 | unlink_async (ehci, qh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | break; |
| 753 | |
| 754 | case PIPE_INTERRUPT: |
| 755 | qh = (struct ehci_qh *) urb->hcpriv; |
| 756 | if (!qh) |
| 757 | break; |
| 758 | switch (qh->qh_state) { |
| 759 | case QH_STATE_LINKED: |
| 760 | intr_deschedule (ehci, qh); |
| 761 | /* FALL THROUGH */ |
| 762 | case QH_STATE_IDLE: |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 763 | qh_completions (ehci, qh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | break; |
| 765 | default: |
| 766 | ehci_dbg (ehci, "bogus qh %p state %d\n", |
| 767 | qh, qh->qh_state); |
| 768 | goto done; |
| 769 | } |
| 770 | |
| 771 | /* reschedule QH iff another request is queued */ |
| 772 | if (!list_empty (&qh->qtd_list) |
| 773 | && HC_IS_RUNNING (hcd->state)) { |
| 774 | int status; |
| 775 | |
| 776 | status = qh_schedule (ehci, qh); |
| 777 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 778 | |
| 779 | if (status != 0) { |
| 780 | // shouldn't happen often, but ... |
| 781 | // FIXME kill those tds' urbs |
| 782 | err ("can't reschedule qh %p, err %d", |
| 783 | qh, status); |
| 784 | } |
| 785 | return status; |
| 786 | } |
| 787 | break; |
| 788 | |
| 789 | case PIPE_ISOCHRONOUS: |
| 790 | // itd or sitd ... |
| 791 | |
| 792 | // wait till next completion, do it then. |
| 793 | // completion irqs can wait up to 1024 msec, |
| 794 | break; |
| 795 | } |
| 796 | done: |
| 797 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 798 | return 0; |
| 799 | } |
| 800 | |
| 801 | /*-------------------------------------------------------------------------*/ |
| 802 | |
| 803 | // bulk qh holds the data toggle |
| 804 | |
| 805 | static void |
| 806 | ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) |
| 807 | { |
| 808 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 809 | unsigned long flags; |
| 810 | struct ehci_qh *qh, *tmp; |
| 811 | |
| 812 | /* ASSERT: any requests/urbs are being unlinked */ |
| 813 | /* ASSERT: nobody can be submitting urbs for this any more */ |
| 814 | |
| 815 | rescan: |
| 816 | spin_lock_irqsave (&ehci->lock, flags); |
| 817 | qh = ep->hcpriv; |
| 818 | if (!qh) |
| 819 | goto done; |
| 820 | |
| 821 | /* endpoints can be iso streams. for now, we don't |
| 822 | * accelerate iso completions ... so spin a while. |
| 823 | */ |
| 824 | if (qh->hw_info1 == 0) { |
| 825 | ehci_vdbg (ehci, "iso delay\n"); |
| 826 | goto idle_timeout; |
| 827 | } |
| 828 | |
| 829 | if (!HC_IS_RUNNING (hcd->state)) |
| 830 | qh->qh_state = QH_STATE_IDLE; |
| 831 | switch (qh->qh_state) { |
| 832 | case QH_STATE_LINKED: |
| 833 | for (tmp = ehci->async->qh_next.qh; |
| 834 | tmp && tmp != qh; |
| 835 | tmp = tmp->qh_next.qh) |
| 836 | continue; |
| 837 | /* periodic qh self-unlinks on empty */ |
| 838 | if (!tmp) |
| 839 | goto nogood; |
| 840 | unlink_async (ehci, qh); |
| 841 | /* FALL THROUGH */ |
| 842 | case QH_STATE_UNLINK: /* wait for hw to finish? */ |
| 843 | idle_timeout: |
| 844 | spin_unlock_irqrestore (&ehci->lock, flags); |
Nishanth Aravamudan | 22c4386 | 2005-08-15 11:30:11 -0700 | [diff] [blame] | 845 | schedule_timeout_uninterruptible(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | goto rescan; |
| 847 | case QH_STATE_IDLE: /* fully unlinked */ |
| 848 | if (list_empty (&qh->qtd_list)) { |
| 849 | qh_put (qh); |
| 850 | break; |
| 851 | } |
| 852 | /* else FALL THROUGH */ |
| 853 | default: |
| 854 | nogood: |
| 855 | /* caller was supposed to have unlinked any requests; |
| 856 | * that's not our job. just leak this memory. |
| 857 | */ |
| 858 | ehci_err (ehci, "qh %p (#%02x) state %d%s\n", |
| 859 | qh, ep->desc.bEndpointAddress, qh->qh_state, |
| 860 | list_empty (&qh->qtd_list) ? "" : "(has tds)"); |
| 861 | break; |
| 862 | } |
| 863 | ep->hcpriv = NULL; |
| 864 | done: |
| 865 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 866 | return; |
| 867 | } |
| 868 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 869 | static int ehci_get_frame (struct usb_hcd *hcd) |
| 870 | { |
| 871 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 872 | return (readl (&ehci->regs->frame_index) >> 3) % ehci->periodic_size; |
| 873 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | |
| 875 | /*-------------------------------------------------------------------------*/ |
| 876 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | #define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC |
| 878 | |
| 879 | MODULE_DESCRIPTION (DRIVER_INFO); |
| 880 | MODULE_AUTHOR (DRIVER_AUTHOR); |
| 881 | MODULE_LICENSE ("GPL"); |
| 882 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 883 | #ifdef CONFIG_PCI |
| 884 | #include "ehci-pci.c" |
Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 885 | #define PCI_DRIVER ehci_pci_driver |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 886 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | |
Li Yang | a11570f | 2006-07-14 19:58:14 +0800 | [diff] [blame] | 888 | #ifdef CONFIG_MPC834x |
Randy Vinson | 80cb9ae | 2006-01-20 13:53:38 -0800 | [diff] [blame] | 889 | #include "ehci-fsl.c" |
Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 890 | #define PLATFORM_DRIVER ehci_fsl_driver |
Randy Vinson | 80cb9ae | 2006-01-20 13:53:38 -0800 | [diff] [blame] | 891 | #endif |
| 892 | |
Ralf Baechle | dfbaa7d | 2006-06-03 23:58:55 +0100 | [diff] [blame] | 893 | #ifdef CONFIG_SOC_AU1200 |
Jordan Crouse | 76fa9a2 | 2006-01-20 14:06:09 -0800 | [diff] [blame] | 894 | #include "ehci-au1xxx.c" |
Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 895 | #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver |
Jordan Crouse | 76fa9a2 | 2006-01-20 14:06:09 -0800 | [diff] [blame] | 896 | #endif |
| 897 | |
Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 898 | #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 899 | #error "missing bus glue for ehci-hcd" |
| 900 | #endif |
Kumar Gala | 01cced2 | 2006-04-11 10:07:16 -0500 | [diff] [blame] | 901 | |
| 902 | static int __init ehci_hcd_init(void) |
| 903 | { |
| 904 | int retval = 0; |
| 905 | |
| 906 | pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n", |
| 907 | hcd_name, |
| 908 | sizeof(struct ehci_qh), sizeof(struct ehci_qtd), |
| 909 | sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); |
| 910 | |
| 911 | #ifdef PLATFORM_DRIVER |
| 912 | retval = platform_driver_register(&PLATFORM_DRIVER); |
| 913 | if (retval < 0) |
| 914 | return retval; |
| 915 | #endif |
| 916 | |
| 917 | #ifdef PCI_DRIVER |
| 918 | retval = pci_register_driver(&PCI_DRIVER); |
| 919 | if (retval < 0) { |
| 920 | #ifdef PLATFORM_DRIVER |
| 921 | platform_driver_unregister(&PLATFORM_DRIVER); |
| 922 | #endif |
| 923 | } |
| 924 | #endif |
| 925 | |
| 926 | return retval; |
| 927 | } |
| 928 | module_init(ehci_hcd_init); |
| 929 | |
| 930 | static void __exit ehci_hcd_cleanup(void) |
| 931 | { |
| 932 | #ifdef PLATFORM_DRIVER |
| 933 | platform_driver_unregister(&PLATFORM_DRIVER); |
| 934 | #endif |
| 935 | #ifdef PCI_DRIVER |
| 936 | pci_unregister_driver(&PCI_DRIVER); |
| 937 | #endif |
| 938 | } |
| 939 | module_exit(ehci_hcd_cleanup); |
| 940 | |