Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2000-2004 by David Brownell |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of the GNU General Public License as published by the |
| 6 | * Free Software Foundation; either version 2 of the License, or (at your |
| 7 | * option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but |
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software Foundation, |
| 16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/config.h> |
| 20 | |
| 21 | #ifdef CONFIG_USB_DEBUG |
| 22 | #define DEBUG |
| 23 | #else |
| 24 | #undef DEBUG |
| 25 | #endif |
| 26 | |
| 27 | #include <linux/module.h> |
| 28 | #include <linux/pci.h> |
| 29 | #include <linux/dmapool.h> |
| 30 | #include <linux/kernel.h> |
| 31 | #include <linux/delay.h> |
| 32 | #include <linux/ioport.h> |
| 33 | #include <linux/sched.h> |
| 34 | #include <linux/slab.h> |
| 35 | #include <linux/smp_lock.h> |
| 36 | #include <linux/errno.h> |
| 37 | #include <linux/init.h> |
| 38 | #include <linux/timer.h> |
| 39 | #include <linux/list.h> |
| 40 | #include <linux/interrupt.h> |
| 41 | #include <linux/reboot.h> |
| 42 | #include <linux/usb.h> |
| 43 | #include <linux/moduleparam.h> |
| 44 | #include <linux/dma-mapping.h> |
| 45 | |
| 46 | #include "../core/hcd.h" |
| 47 | |
| 48 | #include <asm/byteorder.h> |
| 49 | #include <asm/io.h> |
| 50 | #include <asm/irq.h> |
| 51 | #include <asm/system.h> |
| 52 | #include <asm/unaligned.h> |
| 53 | |
| 54 | |
| 55 | /*-------------------------------------------------------------------------*/ |
| 56 | |
| 57 | /* |
| 58 | * EHCI hc_driver implementation ... experimental, incomplete. |
| 59 | * Based on the final 1.0 register interface specification. |
| 60 | * |
| 61 | * USB 2.0 shows up in upcoming www.pcmcia.org technology. |
| 62 | * First was PCMCIA, like ISA; then CardBus, which is PCI. |
| 63 | * Next comes "CardBay", using USB 2.0 signals. |
| 64 | * |
| 65 | * Contains additional contributions by Brad Hards, Rory Bolt, and others. |
| 66 | * Special thanks to Intel and VIA for providing host controllers to |
| 67 | * test this driver on, and Cypress (including In-System Design) for |
| 68 | * providing early devices for those host controllers to talk to! |
| 69 | * |
| 70 | * HISTORY: |
| 71 | * |
| 72 | * 2004-05-10 Root hub and PCI suspend/resume support; remote wakeup. (db) |
| 73 | * 2004-02-24 Replace pci_* with generic dma_* API calls (dsaxena@plexity.net) |
| 74 | * 2003-12-29 Rewritten high speed iso transfer support (by Michal Sojka, |
| 75 | * <sojkam@centrum.cz>, updates by DB). |
| 76 | * |
| 77 | * 2002-11-29 Correct handling for hw async_next register. |
| 78 | * 2002-08-06 Handling for bulk and interrupt transfers is mostly shared; |
| 79 | * only scheduling is different, no arbitrary limitations. |
| 80 | * 2002-07-25 Sanity check PCI reads, mostly for better cardbus support, |
| 81 | * clean up HC run state handshaking. |
| 82 | * 2002-05-24 Preliminary FS/LS interrupts, using scheduling shortcuts |
| 83 | * 2002-05-11 Clear TT errors for FS/LS ctrl/bulk. Fill in some other |
| 84 | * missing pieces: enabling 64bit dma, handoff from BIOS/SMM. |
| 85 | * 2002-05-07 Some error path cleanups to report better errors; wmb(); |
| 86 | * use non-CVS version id; better iso bandwidth claim. |
| 87 | * 2002-04-19 Control/bulk/interrupt submit no longer uses giveback() on |
| 88 | * errors in submit path. Bugfixes to interrupt scheduling/processing. |
| 89 | * 2002-03-05 Initial high-speed ISO support; reduce ITD memory; shift |
| 90 | * more checking to generic hcd framework (db). Make it work with |
| 91 | * Philips EHCI; reduce PCI traffic; shorten IRQ path (Rory Bolt). |
| 92 | * 2002-01-14 Minor cleanup; version synch. |
| 93 | * 2002-01-08 Fix roothub handoff of FS/LS to companion controllers. |
| 94 | * 2002-01-04 Control/Bulk queuing behaves. |
| 95 | * |
| 96 | * 2001-12-12 Initial patch version for Linux 2.5.1 kernel. |
| 97 | * 2001-June Works with usb-storage and NEC EHCI on 2.4 |
| 98 | */ |
| 99 | |
| 100 | #define DRIVER_VERSION "10 Dec 2004" |
| 101 | #define DRIVER_AUTHOR "David Brownell" |
| 102 | #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" |
| 103 | |
| 104 | static const char hcd_name [] = "ehci_hcd"; |
| 105 | |
| 106 | |
| 107 | #undef EHCI_VERBOSE_DEBUG |
| 108 | #undef EHCI_URB_TRACE |
| 109 | |
| 110 | #ifdef DEBUG |
| 111 | #define EHCI_STATS |
| 112 | #endif |
| 113 | |
| 114 | /* magic numbers that can affect system performance */ |
| 115 | #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ |
| 116 | #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ |
| 117 | #define EHCI_TUNE_RL_TT 0 |
| 118 | #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ |
| 119 | #define EHCI_TUNE_MULT_TT 1 |
| 120 | #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */ |
| 121 | |
| 122 | #define EHCI_IAA_JIFFIES (HZ/100) /* arbitrary; ~10 msec */ |
| 123 | #define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */ |
| 124 | #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */ |
| 125 | #define EHCI_SHRINK_JIFFIES (HZ/200) /* async qh unlink delay */ |
| 126 | |
| 127 | /* Initial IRQ latency: faster than hw default */ |
| 128 | static int log2_irq_thresh = 0; // 0 to 6 |
| 129 | module_param (log2_irq_thresh, int, S_IRUGO); |
| 130 | MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); |
| 131 | |
| 132 | /* initial park setting: slower than hw default */ |
| 133 | static unsigned park = 0; |
| 134 | module_param (park, uint, S_IRUGO); |
| 135 | MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); |
| 136 | |
| 137 | #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) |
| 138 | |
| 139 | /*-------------------------------------------------------------------------*/ |
| 140 | |
| 141 | #include "ehci.h" |
| 142 | #include "ehci-dbg.c" |
| 143 | |
| 144 | /*-------------------------------------------------------------------------*/ |
| 145 | |
| 146 | /* |
| 147 | * handshake - spin reading hc until handshake completes or fails |
| 148 | * @ptr: address of hc register to be read |
| 149 | * @mask: bits to look at in result of read |
| 150 | * @done: value of those bits when handshake succeeds |
| 151 | * @usec: timeout in microseconds |
| 152 | * |
| 153 | * Returns negative errno, or zero on success |
| 154 | * |
| 155 | * Success happens when the "mask" bits have the specified value (hardware |
| 156 | * handshake done). There are two failure modes: "usec" have passed (major |
| 157 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
| 158 | * |
| 159 | * That last failure should_only happen in cases like physical cardbus eject |
| 160 | * before driver shutdown. But it also seems to be caused by bugs in cardbus |
| 161 | * bridge shutdown: shutting down the bridge before the devices using it. |
| 162 | */ |
| 163 | static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec) |
| 164 | { |
| 165 | u32 result; |
| 166 | |
| 167 | do { |
| 168 | result = readl (ptr); |
| 169 | if (result == ~(u32)0) /* card removed */ |
| 170 | return -ENODEV; |
| 171 | result &= mask; |
| 172 | if (result == done) |
| 173 | return 0; |
| 174 | udelay (1); |
| 175 | usec--; |
| 176 | } while (usec > 0); |
| 177 | return -ETIMEDOUT; |
| 178 | } |
| 179 | |
| 180 | /* force HC to halt state from unknown (EHCI spec section 2.3) */ |
| 181 | static int ehci_halt (struct ehci_hcd *ehci) |
| 182 | { |
| 183 | u32 temp = readl (&ehci->regs->status); |
| 184 | |
David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 185 | /* disable any irqs left enabled by previous code */ |
| 186 | writel (0, &ehci->regs->intr_enable); |
| 187 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | if ((temp & STS_HALT) != 0) |
| 189 | return 0; |
| 190 | |
| 191 | temp = readl (&ehci->regs->command); |
| 192 | temp &= ~CMD_RUN; |
| 193 | writel (temp, &ehci->regs->command); |
| 194 | return handshake (&ehci->regs->status, STS_HALT, STS_HALT, 16 * 125); |
| 195 | } |
| 196 | |
| 197 | /* put TDI/ARC silicon into EHCI mode */ |
| 198 | static void tdi_reset (struct ehci_hcd *ehci) |
| 199 | { |
| 200 | u32 __iomem *reg_ptr; |
| 201 | u32 tmp; |
| 202 | |
| 203 | reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68); |
| 204 | tmp = readl (reg_ptr); |
| 205 | tmp |= 0x3; |
| 206 | writel (tmp, reg_ptr); |
| 207 | } |
| 208 | |
| 209 | /* reset a non-running (STS_HALT == 1) controller */ |
| 210 | static int ehci_reset (struct ehci_hcd *ehci) |
| 211 | { |
| 212 | int retval; |
| 213 | u32 command = readl (&ehci->regs->command); |
| 214 | |
| 215 | command |= CMD_RESET; |
| 216 | dbg_cmd (ehci, "reset", command); |
| 217 | writel (command, &ehci->regs->command); |
| 218 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; |
| 219 | ehci->next_statechange = jiffies; |
| 220 | retval = handshake (&ehci->regs->command, CMD_RESET, 0, 250 * 1000); |
| 221 | |
| 222 | if (retval) |
| 223 | return retval; |
| 224 | |
| 225 | if (ehci_is_TDI(ehci)) |
| 226 | tdi_reset (ehci); |
| 227 | |
| 228 | return retval; |
| 229 | } |
| 230 | |
| 231 | /* idle the controller (from running) */ |
| 232 | static void ehci_quiesce (struct ehci_hcd *ehci) |
| 233 | { |
| 234 | u32 temp; |
| 235 | |
| 236 | #ifdef DEBUG |
| 237 | if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) |
| 238 | BUG (); |
| 239 | #endif |
| 240 | |
| 241 | /* wait for any schedule enables/disables to take effect */ |
| 242 | temp = readl (&ehci->regs->command) << 10; |
| 243 | temp &= STS_ASS | STS_PSS; |
| 244 | if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, |
| 245 | temp, 16 * 125) != 0) { |
| 246 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; |
| 247 | return; |
| 248 | } |
| 249 | |
| 250 | /* then disable anything that's still active */ |
| 251 | temp = readl (&ehci->regs->command); |
| 252 | temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); |
| 253 | writel (temp, &ehci->regs->command); |
| 254 | |
| 255 | /* hardware can take 16 microframes to turn off ... */ |
| 256 | if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, |
| 257 | 0, 16 * 125) != 0) { |
| 258 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; |
| 259 | return; |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | /*-------------------------------------------------------------------------*/ |
| 264 | |
| 265 | static void ehci_work(struct ehci_hcd *ehci, struct pt_regs *regs); |
| 266 | |
| 267 | #include "ehci-hub.c" |
| 268 | #include "ehci-mem.c" |
| 269 | #include "ehci-q.c" |
| 270 | #include "ehci-sched.c" |
| 271 | |
| 272 | /*-------------------------------------------------------------------------*/ |
| 273 | |
| 274 | static void ehci_watchdog (unsigned long param) |
| 275 | { |
| 276 | struct ehci_hcd *ehci = (struct ehci_hcd *) param; |
| 277 | unsigned long flags; |
| 278 | |
| 279 | spin_lock_irqsave (&ehci->lock, flags); |
| 280 | |
| 281 | /* lost IAA irqs wedge things badly; seen with a vt8235 */ |
| 282 | if (ehci->reclaim) { |
| 283 | u32 status = readl (&ehci->regs->status); |
| 284 | |
| 285 | if (status & STS_IAA) { |
| 286 | ehci_vdbg (ehci, "lost IAA\n"); |
| 287 | COUNT (ehci->stats.lost_iaa); |
| 288 | writel (STS_IAA, &ehci->regs->status); |
| 289 | ehci->reclaim_ready = 1; |
| 290 | } |
| 291 | } |
| 292 | |
| 293 | /* stop async processing after it's idled a bit */ |
| 294 | if (test_bit (TIMER_ASYNC_OFF, &ehci->actions)) |
| 295 | start_unlink_async (ehci, ehci->async); |
| 296 | |
| 297 | /* ehci could run by timer, without IRQs ... */ |
| 298 | ehci_work (ehci, NULL); |
| 299 | |
| 300 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 301 | } |
| 302 | |
David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 303 | /* Reboot notifiers kick in for silicon on any bus (not just pci, etc). |
| 304 | * This forcibly disables dma and IRQs, helping kexec and other cases |
| 305 | * where the next system software may expect clean state. |
| 306 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | static int |
| 308 | ehci_reboot (struct notifier_block *self, unsigned long code, void *null) |
| 309 | { |
| 310 | struct ehci_hcd *ehci; |
| 311 | |
| 312 | ehci = container_of (self, struct ehci_hcd, reboot_notifier); |
David Brownell | 72f30b6 | 2005-09-27 10:19:39 -0700 | [diff] [blame] | 313 | (void) ehci_halt (ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | |
| 315 | /* make BIOS/etc use companion controller during reboot */ |
| 316 | writel (0, &ehci->regs->configured_flag); |
| 317 | return 0; |
| 318 | } |
| 319 | |
David Brownell | 56c1e26 | 2005-04-09 09:00:29 -0700 | [diff] [blame] | 320 | static void ehci_port_power (struct ehci_hcd *ehci, int is_on) |
| 321 | { |
| 322 | unsigned port; |
| 323 | |
| 324 | if (!HCS_PPC (ehci->hcs_params)) |
| 325 | return; |
| 326 | |
| 327 | ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down"); |
| 328 | for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) |
| 329 | (void) ehci_hub_control(ehci_to_hcd(ehci), |
| 330 | is_on ? SetPortFeature : ClearPortFeature, |
| 331 | USB_PORT_FEAT_POWER, |
| 332 | port--, NULL, 0); |
| 333 | msleep(20); |
| 334 | } |
| 335 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 336 | /*-------------------------------------------------------------------------*/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 338 | /* |
| 339 | * ehci_work is called from some interrupts, timers, and so on. |
| 340 | * it calls driver completion functions, after dropping ehci->lock. |
| 341 | */ |
| 342 | static void ehci_work (struct ehci_hcd *ehci, struct pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | { |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 344 | timer_action_done (ehci, TIMER_IO_WATCHDOG); |
| 345 | if (ehci->reclaim_ready) |
| 346 | end_unlink_async (ehci, regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 348 | /* another CPU may drop ehci->lock during a schedule scan while |
| 349 | * it reports urb completions. this flag guards against bogus |
| 350 | * attempts at re-entrant schedule scanning. |
| 351 | */ |
| 352 | if (ehci->scanning) |
| 353 | return; |
| 354 | ehci->scanning = 1; |
| 355 | scan_async (ehci, regs); |
| 356 | if (ehci->next_uframe != -1) |
| 357 | scan_periodic (ehci, regs); |
| 358 | ehci->scanning = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 360 | /* the IO watchdog guards against hardware or driver bugs that |
| 361 | * misplace IRQs, and should let us run completely without IRQs. |
| 362 | * such lossage has been observed on both VT6202 and VT8235. |
| 363 | */ |
| 364 | if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && |
| 365 | (ehci->async->qh_next.ptr != NULL || |
| 366 | ehci->periodic_sched != 0)) |
| 367 | timer_action (ehci, TIMER_IO_WATCHDOG); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | } |
| 369 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 370 | static void ehci_stop (struct usb_hcd *hcd) |
| 371 | { |
| 372 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 373 | |
| 374 | ehci_dbg (ehci, "stop\n"); |
| 375 | |
| 376 | /* Turn off port power on all root hub ports. */ |
| 377 | ehci_port_power (ehci, 0); |
| 378 | |
| 379 | /* no more interrupts ... */ |
| 380 | del_timer_sync (&ehci->watchdog); |
| 381 | |
| 382 | spin_lock_irq(&ehci->lock); |
| 383 | if (HC_IS_RUNNING (hcd->state)) |
| 384 | ehci_quiesce (ehci); |
| 385 | |
| 386 | ehci_reset (ehci); |
| 387 | writel (0, &ehci->regs->intr_enable); |
| 388 | spin_unlock_irq(&ehci->lock); |
| 389 | |
| 390 | /* let companion controllers work when we aren't */ |
| 391 | writel (0, &ehci->regs->configured_flag); |
| 392 | unregister_reboot_notifier (&ehci->reboot_notifier); |
| 393 | |
| 394 | remove_debug_files (ehci); |
| 395 | |
| 396 | /* root hub is shut down separately (first, when possible) */ |
| 397 | spin_lock_irq (&ehci->lock); |
| 398 | if (ehci->async) |
| 399 | ehci_work (ehci, NULL); |
| 400 | spin_unlock_irq (&ehci->lock); |
| 401 | ehci_mem_cleanup (ehci); |
| 402 | |
| 403 | #ifdef EHCI_STATS |
| 404 | ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n", |
| 405 | ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim, |
| 406 | ehci->stats.lost_iaa); |
| 407 | ehci_dbg (ehci, "complete %ld unlink %ld\n", |
| 408 | ehci->stats.complete, ehci->stats.unlink); |
| 409 | #endif |
| 410 | |
| 411 | dbg_status (ehci, "ehci_stop completed", readl (&ehci->regs->status)); |
| 412 | } |
| 413 | |
| 414 | static int ehci_run (struct usb_hcd *hcd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | { |
| 416 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 417 | u32 temp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | int retval; |
| 419 | u32 hcc_params; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | int first; |
| 421 | |
| 422 | /* skip some things on restart paths */ |
| 423 | first = (ehci->watchdog.data == 0); |
| 424 | if (first) { |
| 425 | init_timer (&ehci->watchdog); |
| 426 | ehci->watchdog.function = ehci_watchdog; |
| 427 | ehci->watchdog.data = (unsigned long) ehci; |
| 428 | } |
| 429 | |
| 430 | /* |
| 431 | * hw default: 1K periodic list heads, one per frame. |
| 432 | * periodic_size can shrink by USBCMD update if hcc_params allows. |
| 433 | */ |
| 434 | ehci->periodic_size = DEFAULT_I_TDPS; |
| 435 | if (first && (retval = ehci_mem_init (ehci, GFP_KERNEL)) < 0) |
| 436 | return retval; |
| 437 | |
| 438 | /* controllers may cache some of the periodic schedule ... */ |
| 439 | hcc_params = readl (&ehci->caps->hcc_params); |
| 440 | if (HCC_ISOC_CACHE (hcc_params)) // full frame cache |
| 441 | ehci->i_thresh = 8; |
| 442 | else // N microframes cached |
| 443 | ehci->i_thresh = 2 + HCC_ISOC_THRES (hcc_params); |
| 444 | |
| 445 | ehci->reclaim = NULL; |
| 446 | ehci->reclaim_ready = 0; |
| 447 | ehci->next_uframe = -1; |
| 448 | |
| 449 | /* controller state: unknown --> reset */ |
| 450 | |
| 451 | /* EHCI spec section 4.1 */ |
| 452 | if ((retval = ehci_reset (ehci)) != 0) { |
| 453 | ehci_mem_cleanup (ehci); |
| 454 | return retval; |
| 455 | } |
| 456 | writel (ehci->periodic_dma, &ehci->regs->frame_list); |
| 457 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | /* |
| 459 | * dedicate a qh for the async ring head, since we couldn't unlink |
| 460 | * a 'real' qh without stopping the async schedule [4.8]. use it |
| 461 | * as the 'reclamation list head' too. |
| 462 | * its dummy is used in hw_alt_next of many tds, to prevent the qh |
| 463 | * from automatically advancing to the next td after short reads. |
| 464 | */ |
| 465 | if (first) { |
| 466 | ehci->async->qh_next.qh = NULL; |
| 467 | ehci->async->hw_next = QH_NEXT (ehci->async->qh_dma); |
| 468 | ehci->async->hw_info1 = cpu_to_le32 (QH_HEAD); |
| 469 | ehci->async->hw_token = cpu_to_le32 (QTD_STS_HALT); |
| 470 | ehci->async->hw_qtd_next = EHCI_LIST_END; |
| 471 | ehci->async->qh_state = QH_STATE_LINKED; |
| 472 | ehci->async->hw_alt_next = QTD_NEXT (ehci->async->dummy->qtd_dma); |
| 473 | } |
| 474 | writel ((u32)ehci->async->qh_dma, &ehci->regs->async_next); |
| 475 | |
| 476 | /* |
| 477 | * hcc_params controls whether ehci->regs->segment must (!!!) |
| 478 | * be used; it constrains QH/ITD/SITD and QTD locations. |
| 479 | * pci_pool consistent memory always uses segment zero. |
| 480 | * streaming mappings for I/O buffers, like pci_map_single(), |
| 481 | * can return segments above 4GB, if the device allows. |
| 482 | * |
| 483 | * NOTE: the dma mask is visible through dma_supported(), so |
| 484 | * drivers can pass this info along ... like NETIF_F_HIGHDMA, |
| 485 | * Scsi_Host.highmem_io, and so forth. It's readonly to all |
| 486 | * host side drivers though. |
| 487 | */ |
| 488 | if (HCC_64BIT_ADDR (hcc_params)) { |
| 489 | writel (0, &ehci->regs->segment); |
| 490 | #if 0 |
| 491 | // this is deeply broken on almost all architectures |
David Brownell | d49d431 | 2005-05-07 13:21:50 -0700 | [diff] [blame] | 492 | if (!dma_set_mask (hcd->self.controller, DMA_64BIT_MASK)) |
| 493 | ehci_info (ehci, "enabled 64bit DMA\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | #endif |
| 495 | } |
| 496 | |
| 497 | /* clear interrupt enables, set irq latency */ |
| 498 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) |
| 499 | log2_irq_thresh = 0; |
| 500 | temp = 1 << (16 + log2_irq_thresh); |
| 501 | if (HCC_CANPARK(hcc_params)) { |
| 502 | /* HW default park == 3, on hardware that supports it (like |
| 503 | * NVidia and ALI silicon), maximizes throughput on the async |
| 504 | * schedule by avoiding QH fetches between transfers. |
| 505 | * |
| 506 | * With fast usb storage devices and NForce2, "park" seems to |
| 507 | * make problems: throughput reduction (!), data errors... |
| 508 | */ |
| 509 | if (park) { |
| 510 | park = min (park, (unsigned) 3); |
| 511 | temp |= CMD_PARK; |
| 512 | temp |= park << 8; |
| 513 | } |
| 514 | ehci_info (ehci, "park %d\n", park); |
| 515 | } |
| 516 | if (HCC_PGM_FRAMELISTLEN (hcc_params)) { |
| 517 | /* periodic schedule size can be smaller than default */ |
| 518 | temp &= ~(3 << 2); |
| 519 | temp |= (EHCI_TUNE_FLS << 2); |
| 520 | switch (EHCI_TUNE_FLS) { |
| 521 | case 0: ehci->periodic_size = 1024; break; |
| 522 | case 1: ehci->periodic_size = 512; break; |
| 523 | case 2: ehci->periodic_size = 256; break; |
| 524 | default: BUG (); |
| 525 | } |
| 526 | } |
| 527 | // Philips, Intel, and maybe others need CMD_RUN before the |
| 528 | // root hub will detect new devices (why?); NEC doesn't |
| 529 | temp |= CMD_RUN; |
| 530 | writel (temp, &ehci->regs->command); |
| 531 | dbg_cmd (ehci, "init", temp); |
| 532 | |
| 533 | /* set async sleep time = 10 us ... ? */ |
| 534 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | /* |
| 536 | * Start, enabling full USB 2.0 functionality ... usb 1.1 devices |
| 537 | * are explicitly handed to companion controller(s), so no TT is |
| 538 | * involved with the root hub. (Except where one is integrated, |
| 539 | * and there's no companion controller unless maybe for USB OTG.) |
| 540 | */ |
| 541 | if (first) { |
| 542 | ehci->reboot_notifier.notifier_call = ehci_reboot; |
| 543 | register_reboot_notifier (&ehci->reboot_notifier); |
| 544 | } |
| 545 | |
| 546 | hcd->state = HC_STATE_RUNNING; |
| 547 | writel (FLAG_CF, &ehci->regs->configured_flag); |
| 548 | readl (&ehci->regs->command); /* unblock posted write */ |
| 549 | |
| 550 | temp = HC_VERSION(readl (&ehci->caps->hc_capbase)); |
| 551 | ehci_info (ehci, |
| 552 | "USB %x.%x %s, EHCI %x.%02x, driver %s\n", |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 553 | ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | first ? "initialized" : "restarted", |
| 555 | temp >> 8, temp & 0xff, DRIVER_VERSION); |
| 556 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */ |
| 558 | |
| 559 | if (first) |
| 560 | create_debug_files (ehci); |
| 561 | |
| 562 | return 0; |
| 563 | } |
| 564 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | /*-------------------------------------------------------------------------*/ |
| 566 | |
| 567 | static irqreturn_t ehci_irq (struct usb_hcd *hcd, struct pt_regs *regs) |
| 568 | { |
| 569 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 570 | u32 status; |
| 571 | int bh; |
| 572 | |
| 573 | spin_lock (&ehci->lock); |
| 574 | |
| 575 | status = readl (&ehci->regs->status); |
| 576 | |
| 577 | /* e.g. cardbus physical eject */ |
| 578 | if (status == ~(u32) 0) { |
| 579 | ehci_dbg (ehci, "device removed\n"); |
| 580 | goto dead; |
| 581 | } |
| 582 | |
| 583 | status &= INTR_MASK; |
| 584 | if (!status) { /* irq sharing? */ |
| 585 | spin_unlock(&ehci->lock); |
| 586 | return IRQ_NONE; |
| 587 | } |
| 588 | |
| 589 | /* clear (just) interrupts */ |
| 590 | writel (status, &ehci->regs->status); |
| 591 | readl (&ehci->regs->command); /* unblock posted write */ |
| 592 | bh = 0; |
| 593 | |
| 594 | #ifdef EHCI_VERBOSE_DEBUG |
| 595 | /* unrequested/ignored: Frame List Rollover */ |
| 596 | dbg_status (ehci, "irq", status); |
| 597 | #endif |
| 598 | |
| 599 | /* INT, ERR, and IAA interrupt rates can be throttled */ |
| 600 | |
| 601 | /* normal [4.15.1.2] or error [4.15.1.1] completion */ |
| 602 | if (likely ((status & (STS_INT|STS_ERR)) != 0)) { |
| 603 | if (likely ((status & STS_ERR) == 0)) |
| 604 | COUNT (ehci->stats.normal); |
| 605 | else |
| 606 | COUNT (ehci->stats.error); |
| 607 | bh = 1; |
| 608 | } |
| 609 | |
| 610 | /* complete the unlinking of some qh [4.15.2.3] */ |
| 611 | if (status & STS_IAA) { |
| 612 | COUNT (ehci->stats.reclaim); |
| 613 | ehci->reclaim_ready = 1; |
| 614 | bh = 1; |
| 615 | } |
| 616 | |
| 617 | /* remote wakeup [4.3.1] */ |
| 618 | if ((status & STS_PCD) && hcd->remote_wakeup) { |
| 619 | unsigned i = HCS_N_PORTS (ehci->hcs_params); |
| 620 | |
| 621 | /* resume root hub? */ |
| 622 | status = readl (&ehci->regs->command); |
| 623 | if (!(status & CMD_RUN)) |
| 624 | writel (status | CMD_RUN, &ehci->regs->command); |
| 625 | |
| 626 | while (i--) { |
| 627 | status = readl (&ehci->regs->port_status [i]); |
| 628 | if (status & PORT_OWNER) |
| 629 | continue; |
| 630 | if (!(status & PORT_RESUME) |
| 631 | || ehci->reset_done [i] != 0) |
| 632 | continue; |
| 633 | |
| 634 | /* start 20 msec resume signaling from this port, |
| 635 | * and make khubd collect PORT_STAT_C_SUSPEND to |
| 636 | * stop that signaling. |
| 637 | */ |
| 638 | ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); |
David Brownell | f03c17f | 2005-11-23 15:45:28 -0800 | [diff] [blame^] | 640 | usb_hcd_resume_root_hub(hcd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | } |
| 642 | } |
| 643 | |
| 644 | /* PCI errors [4.15.2.4] */ |
| 645 | if (unlikely ((status & STS_FATAL) != 0)) { |
| 646 | /* bogus "fatal" IRQs appear on some chips... why? */ |
| 647 | status = readl (&ehci->regs->status); |
| 648 | dbg_cmd (ehci, "fatal", readl (&ehci->regs->command)); |
| 649 | dbg_status (ehci, "fatal", status); |
| 650 | if (status & STS_HALT) { |
| 651 | ehci_err (ehci, "fatal error\n"); |
| 652 | dead: |
| 653 | ehci_reset (ehci); |
| 654 | writel (0, &ehci->regs->configured_flag); |
| 655 | /* generic layer kills/unlinks all urbs, then |
| 656 | * uses ehci_stop to clean up the rest |
| 657 | */ |
| 658 | bh = 1; |
| 659 | } |
| 660 | } |
| 661 | |
| 662 | if (bh) |
| 663 | ehci_work (ehci, regs); |
| 664 | spin_unlock (&ehci->lock); |
| 665 | return IRQ_HANDLED; |
| 666 | } |
| 667 | |
| 668 | /*-------------------------------------------------------------------------*/ |
| 669 | |
| 670 | /* |
| 671 | * non-error returns are a promise to giveback() the urb later |
| 672 | * we drop ownership so next owner (or urb unlink) can get it |
| 673 | * |
| 674 | * urb + dev is in hcd.self.controller.urb_list |
| 675 | * we're queueing TDs onto software and hardware lists |
| 676 | * |
| 677 | * hcd-specific init for hcpriv hasn't been done yet |
| 678 | * |
| 679 | * NOTE: control, bulk, and interrupt share the same code to append TDs |
| 680 | * to a (possibly active) QH, and the same QH scanning code. |
| 681 | */ |
| 682 | static int ehci_urb_enqueue ( |
| 683 | struct usb_hcd *hcd, |
| 684 | struct usb_host_endpoint *ep, |
| 685 | struct urb *urb, |
Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 686 | gfp_t mem_flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | ) { |
| 688 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 689 | struct list_head qtd_list; |
| 690 | |
| 691 | INIT_LIST_HEAD (&qtd_list); |
| 692 | |
| 693 | switch (usb_pipetype (urb->pipe)) { |
| 694 | // case PIPE_CONTROL: |
| 695 | // case PIPE_BULK: |
| 696 | default: |
| 697 | if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) |
| 698 | return -ENOMEM; |
| 699 | return submit_async (ehci, ep, urb, &qtd_list, mem_flags); |
| 700 | |
| 701 | case PIPE_INTERRUPT: |
| 702 | if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) |
| 703 | return -ENOMEM; |
| 704 | return intr_submit (ehci, ep, urb, &qtd_list, mem_flags); |
| 705 | |
| 706 | case PIPE_ISOCHRONOUS: |
| 707 | if (urb->dev->speed == USB_SPEED_HIGH) |
| 708 | return itd_submit (ehci, urb, mem_flags); |
| 709 | else |
| 710 | return sitd_submit (ehci, urb, mem_flags); |
| 711 | } |
| 712 | } |
| 713 | |
| 714 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 715 | { |
| 716 | /* if we need to use IAA and it's busy, defer */ |
| 717 | if (qh->qh_state == QH_STATE_LINKED |
| 718 | && ehci->reclaim |
| 719 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) { |
| 720 | struct ehci_qh *last; |
| 721 | |
| 722 | for (last = ehci->reclaim; |
| 723 | last->reclaim; |
| 724 | last = last->reclaim) |
| 725 | continue; |
| 726 | qh->qh_state = QH_STATE_UNLINK_WAIT; |
| 727 | last->reclaim = qh; |
| 728 | |
| 729 | /* bypass IAA if the hc can't care */ |
| 730 | } else if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && ehci->reclaim) |
| 731 | end_unlink_async (ehci, NULL); |
| 732 | |
| 733 | /* something else might have unlinked the qh by now */ |
| 734 | if (qh->qh_state == QH_STATE_LINKED) |
| 735 | start_unlink_async (ehci, qh); |
| 736 | } |
| 737 | |
| 738 | /* remove from hardware lists |
| 739 | * completions normally happen asynchronously |
| 740 | */ |
| 741 | |
| 742 | static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb) |
| 743 | { |
| 744 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 745 | struct ehci_qh *qh; |
| 746 | unsigned long flags; |
| 747 | |
| 748 | spin_lock_irqsave (&ehci->lock, flags); |
| 749 | switch (usb_pipetype (urb->pipe)) { |
| 750 | // case PIPE_CONTROL: |
| 751 | // case PIPE_BULK: |
| 752 | default: |
| 753 | qh = (struct ehci_qh *) urb->hcpriv; |
| 754 | if (!qh) |
| 755 | break; |
| 756 | unlink_async (ehci, qh); |
| 757 | break; |
| 758 | |
| 759 | case PIPE_INTERRUPT: |
| 760 | qh = (struct ehci_qh *) urb->hcpriv; |
| 761 | if (!qh) |
| 762 | break; |
| 763 | switch (qh->qh_state) { |
| 764 | case QH_STATE_LINKED: |
| 765 | intr_deschedule (ehci, qh); |
| 766 | /* FALL THROUGH */ |
| 767 | case QH_STATE_IDLE: |
| 768 | qh_completions (ehci, qh, NULL); |
| 769 | break; |
| 770 | default: |
| 771 | ehci_dbg (ehci, "bogus qh %p state %d\n", |
| 772 | qh, qh->qh_state); |
| 773 | goto done; |
| 774 | } |
| 775 | |
| 776 | /* reschedule QH iff another request is queued */ |
| 777 | if (!list_empty (&qh->qtd_list) |
| 778 | && HC_IS_RUNNING (hcd->state)) { |
| 779 | int status; |
| 780 | |
| 781 | status = qh_schedule (ehci, qh); |
| 782 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 783 | |
| 784 | if (status != 0) { |
| 785 | // shouldn't happen often, but ... |
| 786 | // FIXME kill those tds' urbs |
| 787 | err ("can't reschedule qh %p, err %d", |
| 788 | qh, status); |
| 789 | } |
| 790 | return status; |
| 791 | } |
| 792 | break; |
| 793 | |
| 794 | case PIPE_ISOCHRONOUS: |
| 795 | // itd or sitd ... |
| 796 | |
| 797 | // wait till next completion, do it then. |
| 798 | // completion irqs can wait up to 1024 msec, |
| 799 | break; |
| 800 | } |
| 801 | done: |
| 802 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 803 | return 0; |
| 804 | } |
| 805 | |
| 806 | /*-------------------------------------------------------------------------*/ |
| 807 | |
| 808 | // bulk qh holds the data toggle |
| 809 | |
| 810 | static void |
| 811 | ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) |
| 812 | { |
| 813 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 814 | unsigned long flags; |
| 815 | struct ehci_qh *qh, *tmp; |
| 816 | |
| 817 | /* ASSERT: any requests/urbs are being unlinked */ |
| 818 | /* ASSERT: nobody can be submitting urbs for this any more */ |
| 819 | |
| 820 | rescan: |
| 821 | spin_lock_irqsave (&ehci->lock, flags); |
| 822 | qh = ep->hcpriv; |
| 823 | if (!qh) |
| 824 | goto done; |
| 825 | |
| 826 | /* endpoints can be iso streams. for now, we don't |
| 827 | * accelerate iso completions ... so spin a while. |
| 828 | */ |
| 829 | if (qh->hw_info1 == 0) { |
| 830 | ehci_vdbg (ehci, "iso delay\n"); |
| 831 | goto idle_timeout; |
| 832 | } |
| 833 | |
| 834 | if (!HC_IS_RUNNING (hcd->state)) |
| 835 | qh->qh_state = QH_STATE_IDLE; |
| 836 | switch (qh->qh_state) { |
| 837 | case QH_STATE_LINKED: |
| 838 | for (tmp = ehci->async->qh_next.qh; |
| 839 | tmp && tmp != qh; |
| 840 | tmp = tmp->qh_next.qh) |
| 841 | continue; |
| 842 | /* periodic qh self-unlinks on empty */ |
| 843 | if (!tmp) |
| 844 | goto nogood; |
| 845 | unlink_async (ehci, qh); |
| 846 | /* FALL THROUGH */ |
| 847 | case QH_STATE_UNLINK: /* wait for hw to finish? */ |
| 848 | idle_timeout: |
| 849 | spin_unlock_irqrestore (&ehci->lock, flags); |
Nishanth Aravamudan | 22c4386 | 2005-08-15 11:30:11 -0700 | [diff] [blame] | 850 | schedule_timeout_uninterruptible(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | goto rescan; |
| 852 | case QH_STATE_IDLE: /* fully unlinked */ |
| 853 | if (list_empty (&qh->qtd_list)) { |
| 854 | qh_put (qh); |
| 855 | break; |
| 856 | } |
| 857 | /* else FALL THROUGH */ |
| 858 | default: |
| 859 | nogood: |
| 860 | /* caller was supposed to have unlinked any requests; |
| 861 | * that's not our job. just leak this memory. |
| 862 | */ |
| 863 | ehci_err (ehci, "qh %p (#%02x) state %d%s\n", |
| 864 | qh, ep->desc.bEndpointAddress, qh->qh_state, |
| 865 | list_empty (&qh->qtd_list) ? "" : "(has tds)"); |
| 866 | break; |
| 867 | } |
| 868 | ep->hcpriv = NULL; |
| 869 | done: |
| 870 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 871 | return; |
| 872 | } |
| 873 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 874 | static int ehci_get_frame (struct usb_hcd *hcd) |
| 875 | { |
| 876 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 877 | return (readl (&ehci->regs->frame_index) >> 3) % ehci->periodic_size; |
| 878 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | |
| 880 | /*-------------------------------------------------------------------------*/ |
| 881 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | #define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC |
| 883 | |
| 884 | MODULE_DESCRIPTION (DRIVER_INFO); |
| 885 | MODULE_AUTHOR (DRIVER_AUTHOR); |
| 886 | MODULE_LICENSE ("GPL"); |
| 887 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 888 | #ifdef CONFIG_PCI |
| 889 | #include "ehci-pci.c" |
| 890 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | |
Matt Porter | 7ff71d6 | 2005-09-22 22:31:15 -0700 | [diff] [blame] | 892 | #if !defined(CONFIG_PCI) |
| 893 | #error "missing bus glue for ehci-hcd" |
| 894 | #endif |