Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 1 | /* |
| 2 | * xHCI host controller driver |
| 3 | * |
| 4 | * Copyright (C) 2008 Intel Corp. |
| 5 | * |
| 6 | * Author: Sarah Sharp |
| 7 | * Some code borrowed from the Linux EHCI driver. |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 16 | * for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software Foundation, |
| 20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 23 | #include <linux/pci.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 24 | #include <linux/irq.h> |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 25 | #include <linux/log2.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 27 | #include <linux/moduleparam.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 29 | |
| 30 | #include "xhci.h" |
| 31 | |
| 32 | #define DRIVER_AUTHOR "Sarah Sharp" |
| 33 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
| 34 | |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 35 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
| 36 | static int link_quirk; |
| 37 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); |
| 38 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); |
| 39 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 40 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ |
| 41 | /* |
| 42 | * handshake - spin reading hc until handshake completes or fails |
| 43 | * @ptr: address of hc register to be read |
| 44 | * @mask: bits to look at in result of read |
| 45 | * @done: value of those bits when handshake succeeds |
| 46 | * @usec: timeout in microseconds |
| 47 | * |
| 48 | * Returns negative errno, or zero on success |
| 49 | * |
| 50 | * Success happens when the "mask" bits have the specified value (hardware |
| 51 | * handshake done). There are two failure modes: "usec" have passed (major |
| 52 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
| 53 | */ |
| 54 | static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, |
| 55 | u32 mask, u32 done, int usec) |
| 56 | { |
| 57 | u32 result; |
| 58 | |
| 59 | do { |
| 60 | result = xhci_readl(xhci, ptr); |
| 61 | if (result == ~(u32)0) /* card removed */ |
| 62 | return -ENODEV; |
| 63 | result &= mask; |
| 64 | if (result == done) |
| 65 | return 0; |
| 66 | udelay(1); |
| 67 | usec--; |
| 68 | } while (usec > 0); |
| 69 | return -ETIMEDOUT; |
| 70 | } |
| 71 | |
| 72 | /* |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 73 | * Disable interrupts and begin the xHCI halting process. |
| 74 | */ |
| 75 | void xhci_quiesce(struct xhci_hcd *xhci) |
| 76 | { |
| 77 | u32 halted; |
| 78 | u32 cmd; |
| 79 | u32 mask; |
| 80 | |
| 81 | mask = ~(XHCI_IRQS); |
| 82 | halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; |
| 83 | if (!halted) |
| 84 | mask &= ~CMD_RUN; |
| 85 | |
| 86 | cmd = xhci_readl(xhci, &xhci->op_regs->command); |
| 87 | cmd &= mask; |
| 88 | xhci_writel(xhci, cmd, &xhci->op_regs->command); |
| 89 | } |
| 90 | |
| 91 | /* |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 92 | * Force HC into halt state. |
| 93 | * |
| 94 | * Disable any IRQs and clear the run/stop bit. |
| 95 | * HC will complete any current and actively pipelined transactions, and |
Andiry Xu | bdfca50 | 2011-01-06 15:43:39 +0800 | [diff] [blame] | 96 | * should halt within 16 ms of the run/stop bit being cleared. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 97 | * Read HC Halted bit in the status register to see when the HC is finished. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 98 | */ |
| 99 | int xhci_halt(struct xhci_hcd *xhci) |
| 100 | { |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 101 | int ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 102 | xhci_dbg(xhci, "// Halt the HC\n"); |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 103 | xhci_quiesce(xhci); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 104 | |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 105 | ret = handshake(xhci, &xhci->op_regs->status, |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 106 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 107 | if (!ret) |
| 108 | xhci->xhc_state |= XHCI_STATE_HALTED; |
| 109 | return ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | /* |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 113 | * Set the run bit and wait for the host to be running. |
| 114 | */ |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 115 | static int xhci_start(struct xhci_hcd *xhci) |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 116 | { |
| 117 | u32 temp; |
| 118 | int ret; |
| 119 | |
| 120 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
| 121 | temp |= (CMD_RUN); |
| 122 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", |
| 123 | temp); |
| 124 | xhci_writel(xhci, temp, &xhci->op_regs->command); |
| 125 | |
| 126 | /* |
| 127 | * Wait for the HCHalted Status bit to be 0 to indicate the host is |
| 128 | * running. |
| 129 | */ |
| 130 | ret = handshake(xhci, &xhci->op_regs->status, |
| 131 | STS_HALT, 0, XHCI_MAX_HALT_USEC); |
| 132 | if (ret == -ETIMEDOUT) |
| 133 | xhci_err(xhci, "Host took too long to start, " |
| 134 | "waited %u microseconds.\n", |
| 135 | XHCI_MAX_HALT_USEC); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 136 | if (!ret) |
| 137 | xhci->xhc_state &= ~XHCI_STATE_HALTED; |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 138 | return ret; |
| 139 | } |
| 140 | |
| 141 | /* |
Sarah Sharp | ac04e6f | 2011-03-11 08:47:33 -0800 | [diff] [blame] | 142 | * Reset a halted HC. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 143 | * |
| 144 | * This resets pipelines, timers, counters, state machines, etc. |
| 145 | * Transactions will be terminated immediately, and operational registers |
| 146 | * will be set to their defaults. |
| 147 | */ |
| 148 | int xhci_reset(struct xhci_hcd *xhci) |
| 149 | { |
| 150 | u32 command; |
| 151 | u32 state; |
Sarah Sharp | 2d62f3e | 2010-05-24 13:25:15 -0700 | [diff] [blame] | 152 | int ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 153 | |
| 154 | state = xhci_readl(xhci, &xhci->op_regs->status); |
Sarah Sharp | d3512f6 | 2009-07-27 12:03:50 -0700 | [diff] [blame] | 155 | if ((state & STS_HALT) == 0) { |
| 156 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); |
| 157 | return 0; |
| 158 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 159 | |
| 160 | xhci_dbg(xhci, "// Reset the HC\n"); |
| 161 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 162 | command |= CMD_RESET; |
| 163 | xhci_writel(xhci, command, &xhci->op_regs->command); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 164 | |
Sarah Sharp | 2d62f3e | 2010-05-24 13:25:15 -0700 | [diff] [blame] | 165 | ret = handshake(xhci, &xhci->op_regs->command, |
| 166 | CMD_RESET, 0, 250 * 1000); |
| 167 | if (ret) |
| 168 | return ret; |
| 169 | |
| 170 | xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); |
| 171 | /* |
| 172 | * xHCI cannot write to any doorbells or operational registers other |
| 173 | * than status until the "Controller Not Ready" flag is cleared. |
| 174 | */ |
| 175 | return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 178 | /* |
| 179 | * Free IRQs |
| 180 | * free all IRQs request |
| 181 | */ |
| 182 | static void xhci_free_irq(struct xhci_hcd *xhci) |
| 183 | { |
| 184 | int i; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 185 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
| 186 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 187 | /* return if using legacy interrupt */ |
| 188 | if (xhci_to_hcd(xhci)->irq >= 0) |
| 189 | return; |
| 190 | |
| 191 | if (xhci->msix_entries) { |
| 192 | for (i = 0; i < xhci->msix_count; i++) |
| 193 | if (xhci->msix_entries[i].vector) |
| 194 | free_irq(xhci->msix_entries[i].vector, |
| 195 | xhci_to_hcd(xhci)); |
| 196 | } else if (pdev->irq >= 0) |
| 197 | free_irq(pdev->irq, xhci_to_hcd(xhci)); |
| 198 | |
| 199 | return; |
| 200 | } |
| 201 | |
| 202 | /* |
| 203 | * Set up MSI |
| 204 | */ |
| 205 | static int xhci_setup_msi(struct xhci_hcd *xhci) |
| 206 | { |
| 207 | int ret; |
| 208 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
| 209 | |
| 210 | ret = pci_enable_msi(pdev); |
| 211 | if (ret) { |
| 212 | xhci_err(xhci, "failed to allocate MSI entry\n"); |
| 213 | return ret; |
| 214 | } |
| 215 | |
| 216 | ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, |
| 217 | 0, "xhci_hcd", xhci_to_hcd(xhci)); |
| 218 | if (ret) { |
| 219 | xhci_err(xhci, "disable MSI interrupt\n"); |
| 220 | pci_disable_msi(pdev); |
| 221 | } |
| 222 | |
| 223 | return ret; |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Set up MSI-X |
| 228 | */ |
| 229 | static int xhci_setup_msix(struct xhci_hcd *xhci) |
| 230 | { |
| 231 | int i, ret = 0; |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 232 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 233 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 234 | |
| 235 | /* |
| 236 | * calculate number of msi-x vectors supported. |
| 237 | * - HCS_MAX_INTRS: the max number of interrupts the host can handle, |
| 238 | * with max number of interrupters based on the xhci HCSPARAMS1. |
| 239 | * - num_online_cpus: maximum msi-x vectors per CPUs core. |
| 240 | * Add additional 1 vector to ensure always available interrupt. |
| 241 | */ |
| 242 | xhci->msix_count = min(num_online_cpus() + 1, |
| 243 | HCS_MAX_INTRS(xhci->hcs_params1)); |
| 244 | |
| 245 | xhci->msix_entries = |
| 246 | kmalloc((sizeof(struct msix_entry))*xhci->msix_count, |
Greg Kroah-Hartman | 8687197 | 2010-11-11 09:41:02 -0800 | [diff] [blame] | 247 | GFP_KERNEL); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 248 | if (!xhci->msix_entries) { |
| 249 | xhci_err(xhci, "Failed to allocate MSI-X entries\n"); |
| 250 | return -ENOMEM; |
| 251 | } |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 252 | |
| 253 | for (i = 0; i < xhci->msix_count; i++) { |
| 254 | xhci->msix_entries[i].entry = i; |
| 255 | xhci->msix_entries[i].vector = 0; |
| 256 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 257 | |
| 258 | ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); |
| 259 | if (ret) { |
| 260 | xhci_err(xhci, "Failed to enable MSI-X\n"); |
| 261 | goto free_entries; |
| 262 | } |
| 263 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 264 | for (i = 0; i < xhci->msix_count; i++) { |
| 265 | ret = request_irq(xhci->msix_entries[i].vector, |
| 266 | (irq_handler_t)xhci_msi_irq, |
| 267 | 0, "xhci_hcd", xhci_to_hcd(xhci)); |
| 268 | if (ret) |
| 269 | goto disable_msix; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 270 | } |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 271 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 272 | hcd->msix_enabled = 1; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 273 | return ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 274 | |
| 275 | disable_msix: |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 276 | xhci_err(xhci, "disable MSI-X interrupt\n"); |
| 277 | xhci_free_irq(xhci); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 278 | pci_disable_msix(pdev); |
| 279 | free_entries: |
| 280 | kfree(xhci->msix_entries); |
| 281 | xhci->msix_entries = NULL; |
| 282 | return ret; |
| 283 | } |
| 284 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 285 | /* Free any IRQs and disable MSI-X */ |
| 286 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) |
| 287 | { |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 288 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 289 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 290 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 291 | xhci_free_irq(xhci); |
| 292 | |
| 293 | if (xhci->msix_entries) { |
| 294 | pci_disable_msix(pdev); |
| 295 | kfree(xhci->msix_entries); |
| 296 | xhci->msix_entries = NULL; |
| 297 | } else { |
| 298 | pci_disable_msi(pdev); |
| 299 | } |
| 300 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 301 | hcd->msix_enabled = 0; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 302 | return; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 303 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 304 | |
| 305 | /* |
| 306 | * Initialize memory for HCD and xHC (one-time init). |
| 307 | * |
| 308 | * Program the PAGESIZE register, initialize the device context array, create |
| 309 | * device contexts (?), set up a command ring segment (or two?), create event |
| 310 | * ring (one for now). |
| 311 | */ |
| 312 | int xhci_init(struct usb_hcd *hcd) |
| 313 | { |
| 314 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 315 | int retval = 0; |
| 316 | |
| 317 | xhci_dbg(xhci, "xhci_init\n"); |
| 318 | spin_lock_init(&xhci->lock); |
Sebastian Andrzej Siewior | d782659 | 2011-09-13 16:41:10 -0700 | [diff] [blame] | 319 | if (xhci->hci_version == 0x95 && link_quirk) { |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 320 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); |
| 321 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; |
| 322 | } else { |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 323 | xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 324 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 325 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
| 326 | xhci_dbg(xhci, "Finished xhci_init\n"); |
| 327 | |
| 328 | return retval; |
| 329 | } |
| 330 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 331 | /*-------------------------------------------------------------------------*/ |
| 332 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 333 | |
| 334 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 335 | static void xhci_event_ring_work(unsigned long arg) |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 336 | { |
| 337 | unsigned long flags; |
| 338 | int temp; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 339 | u64 temp_64; |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 340 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; |
| 341 | int i, j; |
| 342 | |
| 343 | xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); |
| 344 | |
| 345 | spin_lock_irqsave(&xhci->lock, flags); |
| 346 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 347 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 348 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
| 349 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | e4ab05d | 2009-09-16 16:42:30 -0700 | [diff] [blame] | 350 | xhci_dbg(xhci, "HW died, polling stopped.\n"); |
| 351 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 352 | return; |
| 353 | } |
| 354 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 355 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
| 356 | xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 357 | xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); |
| 358 | xhci->error_bitmask = 0; |
| 359 | xhci_dbg(xhci, "Event ring:\n"); |
| 360 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); |
| 361 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 362 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
| 363 | temp_64 &= ~ERST_PTR_MASK; |
| 364 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 365 | xhci_dbg(xhci, "Command ring:\n"); |
| 366 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); |
| 367 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
| 368 | xhci_dbg_cmd_ptrs(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 369 | for (i = 0; i < MAX_HC_SLOTS; ++i) { |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 370 | if (!xhci->devs[i]) |
| 371 | continue; |
| 372 | for (j = 0; j < 31; ++j) { |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 373 | xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 374 | } |
| 375 | } |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 376 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 377 | |
| 378 | if (!xhci->zombie) |
| 379 | mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); |
| 380 | else |
| 381 | xhci_dbg(xhci, "Quit polling the event ring.\n"); |
| 382 | } |
| 383 | #endif |
| 384 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 385 | static int xhci_run_finished(struct xhci_hcd *xhci) |
| 386 | { |
| 387 | if (xhci_start(xhci)) { |
| 388 | xhci_halt(xhci); |
| 389 | return -ENODEV; |
| 390 | } |
| 391 | xhci->shared_hcd->state = HC_STATE_RUNNING; |
| 392 | |
| 393 | if (xhci->quirks & XHCI_NEC_HOST) |
| 394 | xhci_ring_cmd_db(xhci); |
| 395 | |
| 396 | xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); |
| 397 | return 0; |
| 398 | } |
| 399 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 400 | /* |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 401 | * Start the HC after it was halted. |
| 402 | * |
| 403 | * This function is called by the USB core when the HC driver is added. |
| 404 | * Its opposite is xhci_stop(). |
| 405 | * |
| 406 | * xhci_init() must be called once before this function can be called. |
| 407 | * Reset the HC, enable device slot contexts, program DCBAAP, and |
| 408 | * set command ring pointer and event ring pointer. |
| 409 | * |
| 410 | * Setup MSI-X vectors and enable interrupts. |
| 411 | */ |
| 412 | int xhci_run(struct usb_hcd *hcd) |
| 413 | { |
| 414 | u32 temp; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 415 | u64 temp_64; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 416 | u32 ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 417 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 418 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 419 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 420 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
| 421 | * is setup. |
| 422 | */ |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 423 | |
Sarah Sharp | 0f2a793 | 2009-04-27 19:57:12 -0700 | [diff] [blame] | 424 | hcd->uses_new_polling = 1; |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 425 | if (!usb_hcd_is_primary_hcd(hcd)) |
| 426 | return xhci_run_finished(xhci); |
Sarah Sharp | 0f2a793 | 2009-04-27 19:57:12 -0700 | [diff] [blame] | 427 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 428 | xhci_dbg(xhci, "xhci_run\n"); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 429 | /* unregister the legacy interrupt */ |
| 430 | if (hcd->irq) |
| 431 | free_irq(hcd->irq, hcd); |
| 432 | hcd->irq = -1; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 433 | |
Sarah Sharp | f5182b4 | 2011-06-02 11:33:02 -0700 | [diff] [blame] | 434 | /* Some Fresco Logic host controllers advertise MSI, but fail to |
| 435 | * generate interrupts. Don't even try to enable MSI. |
| 436 | */ |
| 437 | if (xhci->quirks & XHCI_BROKEN_MSI) |
| 438 | goto legacy_irq; |
| 439 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 440 | ret = xhci_setup_msix(xhci); |
| 441 | if (ret) |
| 442 | /* fall back to msi*/ |
| 443 | ret = xhci_setup_msi(xhci); |
| 444 | |
| 445 | if (ret) { |
Sarah Sharp | f5182b4 | 2011-06-02 11:33:02 -0700 | [diff] [blame] | 446 | legacy_irq: |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 447 | /* fall back to legacy interrupt*/ |
| 448 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, |
| 449 | hcd->irq_descr, hcd); |
| 450 | if (ret) { |
| 451 | xhci_err(xhci, "request interrupt %d failed\n", |
| 452 | pdev->irq); |
| 453 | return ret; |
| 454 | } |
| 455 | hcd->irq = pdev->irq; |
| 456 | } |
| 457 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 458 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
| 459 | init_timer(&xhci->event_ring_timer); |
| 460 | xhci->event_ring_timer.data = (unsigned long) xhci; |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 461 | xhci->event_ring_timer.function = xhci_event_ring_work; |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 462 | /* Poll the event ring */ |
| 463 | xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; |
| 464 | xhci->zombie = 0; |
| 465 | xhci_dbg(xhci, "Setting event ring polling timer\n"); |
| 466 | add_timer(&xhci->event_ring_timer); |
| 467 | #endif |
| 468 | |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 469 | xhci_dbg(xhci, "Command ring memory map follows:\n"); |
| 470 | xhci_debug_ring(xhci, xhci->cmd_ring); |
| 471 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
| 472 | xhci_dbg_cmd_ptrs(xhci); |
| 473 | |
| 474 | xhci_dbg(xhci, "ERST memory map follows:\n"); |
| 475 | xhci_dbg_erst(xhci, &xhci->erst); |
| 476 | xhci_dbg(xhci, "Event ring:\n"); |
| 477 | xhci_debug_ring(xhci, xhci->event_ring); |
| 478 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
| 479 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
| 480 | temp_64 &= ~ERST_PTR_MASK; |
| 481 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); |
| 482 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 483 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); |
| 484 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); |
Sarah Sharp | a4d8830 | 2009-05-14 11:44:26 -0700 | [diff] [blame] | 485 | temp &= ~ER_IRQ_INTERVAL_MASK; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 486 | temp |= (u32) 160; |
| 487 | xhci_writel(xhci, temp, &xhci->ir_set->irq_control); |
| 488 | |
| 489 | /* Set the HCD state before we enable the irqs */ |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 490 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
| 491 | temp |= (CMD_EIE); |
| 492 | xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", |
| 493 | temp); |
| 494 | xhci_writel(xhci, temp, &xhci->op_regs->command); |
| 495 | |
| 496 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 497 | xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", |
| 498 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 499 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), |
| 500 | &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 501 | xhci_print_ir_set(xhci, 0); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 502 | |
Sarah Sharp | 0238634 | 2010-05-24 13:25:28 -0700 | [diff] [blame] | 503 | if (xhci->quirks & XHCI_NEC_HOST) |
| 504 | xhci_queue_vendor_command(xhci, 0, 0, 0, |
| 505 | TRB_TYPE(TRB_NEC_GET_FW)); |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 506 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 507 | xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 508 | return 0; |
| 509 | } |
| 510 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 511 | static void xhci_only_stop_hcd(struct usb_hcd *hcd) |
| 512 | { |
| 513 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 514 | |
| 515 | spin_lock_irq(&xhci->lock); |
| 516 | xhci_halt(xhci); |
| 517 | |
| 518 | /* The shared_hcd is going to be deallocated shortly (the USB core only |
| 519 | * calls this function when allocation fails in usb_add_hcd(), or |
| 520 | * usb_remove_hcd() is called). So we need to unset xHCI's pointer. |
| 521 | */ |
| 522 | xhci->shared_hcd = NULL; |
| 523 | spin_unlock_irq(&xhci->lock); |
| 524 | } |
| 525 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 526 | /* |
| 527 | * Stop xHCI driver. |
| 528 | * |
| 529 | * This function is called by the USB core when the HC driver is removed. |
| 530 | * Its opposite is xhci_run(). |
| 531 | * |
| 532 | * Disable device contexts, disable IRQs, and quiesce the HC. |
| 533 | * Reset the HC, finish any completed transactions, and cleanup memory. |
| 534 | */ |
| 535 | void xhci_stop(struct usb_hcd *hcd) |
| 536 | { |
| 537 | u32 temp; |
| 538 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 539 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 540 | if (!usb_hcd_is_primary_hcd(hcd)) { |
| 541 | xhci_only_stop_hcd(xhci->shared_hcd); |
| 542 | return; |
| 543 | } |
| 544 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 545 | spin_lock_irq(&xhci->lock); |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 546 | /* Make sure the xHC is halted for a USB3 roothub |
| 547 | * (xhci_stop() could be called as part of failed init). |
| 548 | */ |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 549 | xhci_halt(xhci); |
| 550 | xhci_reset(xhci); |
| 551 | spin_unlock_irq(&xhci->lock); |
| 552 | |
Zhang Rui | 40a9fb1 | 2010-12-17 13:17:04 -0800 | [diff] [blame] | 553 | xhci_cleanup_msix(xhci); |
| 554 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 555 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
| 556 | /* Tell the event ring poll function not to reschedule */ |
| 557 | xhci->zombie = 1; |
| 558 | del_timer_sync(&xhci->event_ring_timer); |
| 559 | #endif |
| 560 | |
Andiry Xu | c41136b | 2011-03-22 17:08:14 +0800 | [diff] [blame] | 561 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
| 562 | usb_amd_dev_put(); |
| 563 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 564 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
| 565 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 566 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); |
| 567 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
| 568 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
| 569 | &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 570 | xhci_print_ir_set(xhci, 0); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 571 | |
| 572 | xhci_dbg(xhci, "cleaning up memory\n"); |
| 573 | xhci_mem_cleanup(xhci); |
| 574 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
| 575 | xhci_readl(xhci, &xhci->op_regs->status)); |
| 576 | } |
| 577 | |
| 578 | /* |
| 579 | * Shutdown HC (not bus-specific) |
| 580 | * |
| 581 | * This is called when the machine is rebooting or halting. We assume that the |
| 582 | * machine will be powered off, and the HC's internal state will be reset. |
| 583 | * Don't bother to free memory. |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 584 | * |
| 585 | * This will only ever be called with the main usb_hcd (the USB3 roothub). |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 586 | */ |
| 587 | void xhci_shutdown(struct usb_hcd *hcd) |
| 588 | { |
| 589 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 590 | |
| 591 | spin_lock_irq(&xhci->lock); |
| 592 | xhci_halt(xhci); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 593 | spin_unlock_irq(&xhci->lock); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 594 | |
Zhang Rui | 40a9fb1 | 2010-12-17 13:17:04 -0800 | [diff] [blame] | 595 | xhci_cleanup_msix(xhci); |
| 596 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 597 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", |
| 598 | xhci_readl(xhci, &xhci->op_regs->status)); |
| 599 | } |
| 600 | |
Sarah Sharp | b5b5c3a | 2010-10-15 11:24:14 -0700 | [diff] [blame] | 601 | #ifdef CONFIG_PM |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 602 | static void xhci_save_registers(struct xhci_hcd *xhci) |
| 603 | { |
| 604 | xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command); |
| 605 | xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); |
| 606 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
| 607 | xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); |
| 608 | xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
| 609 | xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); |
| 610 | xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); |
| 611 | xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
| 612 | xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
| 613 | } |
| 614 | |
| 615 | static void xhci_restore_registers(struct xhci_hcd *xhci) |
| 616 | { |
| 617 | xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command); |
| 618 | xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); |
| 619 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); |
| 620 | xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); |
| 621 | xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); |
| 622 | xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); |
| 623 | xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); |
| 624 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); |
| 625 | } |
| 626 | |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 627 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) |
| 628 | { |
| 629 | u64 val_64; |
| 630 | |
| 631 | /* step 2: initialize command ring buffer */ |
| 632 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
| 633 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
| 634 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
| 635 | xhci->cmd_ring->dequeue) & |
| 636 | (u64) ~CMD_RING_RSVD_BITS) | |
| 637 | xhci->cmd_ring->cycle_state; |
| 638 | xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", |
| 639 | (long unsigned long) val_64); |
| 640 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
| 641 | } |
| 642 | |
| 643 | /* |
| 644 | * The whole command ring must be cleared to zero when we suspend the host. |
| 645 | * |
| 646 | * The host doesn't save the command ring pointer in the suspend well, so we |
| 647 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte |
| 648 | * aligned, because of the reserved bits in the command ring dequeue pointer |
| 649 | * register. Therefore, we can't just set the dequeue pointer back in the |
| 650 | * middle of the ring (TRBs are 16-byte aligned). |
| 651 | */ |
| 652 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) |
| 653 | { |
| 654 | struct xhci_ring *ring; |
| 655 | struct xhci_segment *seg; |
| 656 | |
| 657 | ring = xhci->cmd_ring; |
| 658 | seg = ring->deq_seg; |
| 659 | do { |
| 660 | memset(seg->trbs, 0, SEGMENT_SIZE); |
| 661 | seg = seg->next; |
| 662 | } while (seg != ring->deq_seg); |
| 663 | |
| 664 | /* Reset the software enqueue and dequeue pointers */ |
| 665 | ring->deq_seg = ring->first_seg; |
| 666 | ring->dequeue = ring->first_seg->trbs; |
| 667 | ring->enq_seg = ring->deq_seg; |
| 668 | ring->enqueue = ring->dequeue; |
| 669 | |
| 670 | /* |
| 671 | * Ring is now zeroed, so the HW should look for change of ownership |
| 672 | * when the cycle bit is set to 1. |
| 673 | */ |
| 674 | ring->cycle_state = 1; |
| 675 | |
| 676 | /* |
| 677 | * Reset the hardware dequeue pointer. |
| 678 | * Yes, this will need to be re-written after resume, but we're paranoid |
| 679 | * and want to make sure the hardware doesn't access bogus memory |
| 680 | * because, say, the BIOS or an SMI started the host without changing |
| 681 | * the command ring pointers. |
| 682 | */ |
| 683 | xhci_set_cmd_ring_deq(xhci); |
| 684 | } |
| 685 | |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 686 | /* |
| 687 | * Stop HC (not bus-specific) |
| 688 | * |
| 689 | * This is called when the machine transition into S3/S4 mode. |
| 690 | * |
| 691 | */ |
| 692 | int xhci_suspend(struct xhci_hcd *xhci) |
| 693 | { |
| 694 | int rc = 0; |
| 695 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 696 | u32 command; |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 697 | int i; |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 698 | |
| 699 | spin_lock_irq(&xhci->lock); |
| 700 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
Sarah Sharp | b3209379 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 701 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 702 | /* step 1: stop endpoint */ |
| 703 | /* skipped assuming that port suspend has done */ |
| 704 | |
| 705 | /* step 2: clear Run/Stop bit */ |
| 706 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 707 | command &= ~CMD_RUN; |
| 708 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 709 | if (handshake(xhci, &xhci->op_regs->status, |
| 710 | STS_HALT, STS_HALT, 100*100)) { |
| 711 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); |
| 712 | spin_unlock_irq(&xhci->lock); |
| 713 | return -ETIMEDOUT; |
| 714 | } |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 715 | xhci_clear_command_ring(xhci); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 716 | |
| 717 | /* step 3: save registers */ |
| 718 | xhci_save_registers(xhci); |
| 719 | |
| 720 | /* step 4: set CSS flag */ |
| 721 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 722 | command |= CMD_CSS; |
| 723 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 724 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { |
| 725 | xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); |
| 726 | spin_unlock_irq(&xhci->lock); |
| 727 | return -ETIMEDOUT; |
| 728 | } |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 729 | spin_unlock_irq(&xhci->lock); |
| 730 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 731 | /* step 5: remove core well power */ |
| 732 | /* synchronize irq when using MSI-X */ |
| 733 | if (xhci->msix_entries) { |
| 734 | for (i = 0; i < xhci->msix_count; i++) |
| 735 | synchronize_irq(xhci->msix_entries[i].vector); |
| 736 | } |
| 737 | |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 738 | return rc; |
| 739 | } |
| 740 | |
| 741 | /* |
| 742 | * start xHC (not bus-specific) |
| 743 | * |
| 744 | * This is called when the machine transition from S3/S4 mode. |
| 745 | * |
| 746 | */ |
| 747 | int xhci_resume(struct xhci_hcd *xhci, bool hibernated) |
| 748 | { |
| 749 | u32 command, temp = 0; |
| 750 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 751 | struct usb_hcd *secondary_hcd; |
Andiry Xu | 019a35f | 2011-01-06 15:43:17 +0800 | [diff] [blame] | 752 | int retval; |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 753 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 754 | /* Wait a bit if either of the roothubs need to settle from the |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 755 | * transition into bus suspend. |
Sarah Sharp | 20b67cf | 2010-12-15 12:47:14 -0800 | [diff] [blame] | 756 | */ |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 757 | if (time_before(jiffies, xhci->bus_state[0].next_statechange) || |
| 758 | time_before(jiffies, |
| 759 | xhci->bus_state[1].next_statechange)) |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 760 | msleep(100); |
| 761 | |
| 762 | spin_lock_irq(&xhci->lock); |
Maarten Lankhorst | c877b3b | 2011-06-15 23:47:21 +0200 | [diff] [blame] | 763 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
| 764 | hibernated = true; |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 765 | |
| 766 | if (!hibernated) { |
| 767 | /* step 1: restore register */ |
| 768 | xhci_restore_registers(xhci); |
| 769 | /* step 2: initialize command ring buffer */ |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 770 | xhci_set_cmd_ring_deq(xhci); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 771 | /* step 3: restore state and start state*/ |
| 772 | /* step 3: set CRS flag */ |
| 773 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 774 | command |= CMD_CRS; |
| 775 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 776 | if (handshake(xhci, &xhci->op_regs->status, |
| 777 | STS_RESTORE, 0, 10*100)) { |
| 778 | xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); |
| 779 | spin_unlock_irq(&xhci->lock); |
| 780 | return -ETIMEDOUT; |
| 781 | } |
| 782 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 783 | } |
| 784 | |
| 785 | /* If restore operation fails, re-initialize the HC during resume */ |
| 786 | if ((temp & STS_SRE) || hibernated) { |
Sarah Sharp | fedd383 | 2011-04-12 17:43:19 -0700 | [diff] [blame] | 787 | /* Let the USB core know _both_ roothubs lost power. */ |
| 788 | usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); |
| 789 | usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 790 | |
| 791 | xhci_dbg(xhci, "Stop HCD\n"); |
| 792 | xhci_halt(xhci); |
| 793 | xhci_reset(xhci); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 794 | spin_unlock_irq(&xhci->lock); |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 795 | xhci_cleanup_msix(xhci); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 796 | |
| 797 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
| 798 | /* Tell the event ring poll function not to reschedule */ |
| 799 | xhci->zombie = 1; |
| 800 | del_timer_sync(&xhci->event_ring_timer); |
| 801 | #endif |
| 802 | |
| 803 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
| 804 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 805 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); |
| 806 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
| 807 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
| 808 | &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 809 | xhci_print_ir_set(xhci, 0); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 810 | |
| 811 | xhci_dbg(xhci, "cleaning up memory\n"); |
| 812 | xhci_mem_cleanup(xhci); |
| 813 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
| 814 | xhci_readl(xhci, &xhci->op_regs->status)); |
| 815 | |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 816 | /* USB core calls the PCI reinit and start functions twice: |
| 817 | * first with the primary HCD, and then with the secondary HCD. |
| 818 | * If we don't do the same, the host will never be started. |
| 819 | */ |
| 820 | if (!usb_hcd_is_primary_hcd(hcd)) |
| 821 | secondary_hcd = hcd; |
| 822 | else |
| 823 | secondary_hcd = xhci->shared_hcd; |
| 824 | |
| 825 | xhci_dbg(xhci, "Initialize the xhci_hcd\n"); |
| 826 | retval = xhci_init(hcd->primary_hcd); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 827 | if (retval) |
| 828 | return retval; |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 829 | xhci_dbg(xhci, "Start the primary HCD\n"); |
| 830 | retval = xhci_run(hcd->primary_hcd); |
| 831 | if (retval) |
| 832 | goto failed_restart; |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 833 | |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 834 | xhci_dbg(xhci, "Start the secondary HCD\n"); |
| 835 | retval = xhci_run(secondary_hcd); |
Sarah Sharp | b3209379 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 836 | if (!retval) { |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 837 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
Sarah Sharp | b3209379 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 838 | set_bit(HCD_FLAG_HW_ACCESSIBLE, |
| 839 | &xhci->shared_hcd->flags); |
| 840 | } |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 841 | failed_restart: |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 842 | hcd->state = HC_STATE_SUSPENDED; |
Sarah Sharp | b3209379 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 843 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 844 | return retval; |
| 845 | } |
| 846 | |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 847 | /* step 4: set Run/Stop bit */ |
| 848 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 849 | command |= CMD_RUN; |
| 850 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 851 | handshake(xhci, &xhci->op_regs->status, STS_HALT, |
| 852 | 0, 250 * 1000); |
| 853 | |
| 854 | /* step 5: walk topology and initialize portsc, |
| 855 | * portpmsc and portli |
| 856 | */ |
| 857 | /* this is done in bus_resume */ |
| 858 | |
| 859 | /* step 6: restart each of the previously |
| 860 | * Running endpoints by ringing their doorbells |
| 861 | */ |
| 862 | |
| 863 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
Sarah Sharp | b3209379 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 864 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
Andiry Xu | 5535b1d5 | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 865 | |
| 866 | spin_unlock_irq(&xhci->lock); |
| 867 | return 0; |
| 868 | } |
Sarah Sharp | b5b5c3a | 2010-10-15 11:24:14 -0700 | [diff] [blame] | 869 | #endif /* CONFIG_PM */ |
| 870 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 871 | /*-------------------------------------------------------------------------*/ |
| 872 | |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 873 | /** |
| 874 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and |
| 875 | * HCDs. Find the index for an endpoint given its descriptor. Use the return |
| 876 | * value to right shift 1 for the bitmask. |
| 877 | * |
| 878 | * Index = (epnum * 2) + direction - 1, |
| 879 | * where direction = 0 for OUT, 1 for IN. |
| 880 | * For control endpoints, the IN index is used (OUT index is unused), so |
| 881 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) |
| 882 | */ |
| 883 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) |
| 884 | { |
| 885 | unsigned int index; |
| 886 | if (usb_endpoint_xfer_control(desc)) |
| 887 | index = (unsigned int) (usb_endpoint_num(desc)*2); |
| 888 | else |
| 889 | index = (unsigned int) (usb_endpoint_num(desc)*2) + |
| 890 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; |
| 891 | return index; |
| 892 | } |
| 893 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 894 | /* Find the flag for this endpoint (for use in the control context). Use the |
| 895 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
| 896 | * bit 1, etc. |
| 897 | */ |
| 898 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) |
| 899 | { |
| 900 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
| 901 | } |
| 902 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 903 | /* Find the flag for this endpoint (for use in the control context). Use the |
| 904 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
| 905 | * bit 1, etc. |
| 906 | */ |
| 907 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) |
| 908 | { |
| 909 | return 1 << (ep_index + 1); |
| 910 | } |
| 911 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 912 | /* Compute the last valid endpoint context index. Basically, this is the |
| 913 | * endpoint index plus one. For slot contexts with more than valid endpoint, |
| 914 | * we find the most significant bit set in the added contexts flags. |
| 915 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 |
| 916 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. |
| 917 | */ |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 918 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 919 | { |
| 920 | return fls(added_ctxs) - 1; |
| 921 | } |
| 922 | |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 923 | /* Returns 1 if the arguments are OK; |
| 924 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
| 925 | */ |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 926 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 927 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
| 928 | const char *func) { |
| 929 | struct xhci_hcd *xhci; |
| 930 | struct xhci_virt_device *virt_dev; |
| 931 | |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 932 | if (!hcd || (check_ep && !ep) || !udev) { |
| 933 | printk(KERN_DEBUG "xHCI %s called with invalid args\n", |
| 934 | func); |
| 935 | return -EINVAL; |
| 936 | } |
| 937 | if (!udev->parent) { |
| 938 | printk(KERN_DEBUG "xHCI %s called for root hub\n", |
| 939 | func); |
| 940 | return 0; |
| 941 | } |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 942 | |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 943 | xhci = hcd_to_xhci(hcd); |
| 944 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
| 945 | return -ENODEV; |
| 946 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 947 | if (check_virt_dev) { |
sifram.rajas@gmail.com | 73ddc24 | 2011-09-02 11:06:00 -0700 | [diff] [blame] | 948 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 949 | printk(KERN_DEBUG "xHCI %s called with unaddressed " |
| 950 | "device\n", func); |
| 951 | return -EINVAL; |
| 952 | } |
| 953 | |
| 954 | virt_dev = xhci->devs[udev->slot_id]; |
| 955 | if (virt_dev->udev != udev) { |
| 956 | printk(KERN_DEBUG "xHCI %s called with udev and " |
| 957 | "virt_dev does not match\n", func); |
| 958 | return -EINVAL; |
| 959 | } |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 960 | } |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 961 | |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 962 | return 1; |
| 963 | } |
| 964 | |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 965 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 966 | struct usb_device *udev, struct xhci_command *command, |
| 967 | bool ctx_change, bool must_succeed); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 968 | |
| 969 | /* |
| 970 | * Full speed devices may have a max packet size greater than 8 bytes, but the |
| 971 | * USB core doesn't know that until it reads the first 8 bytes of the |
| 972 | * descriptor. If the usb_device's max packet size changes after that point, |
| 973 | * we need to issue an evaluate context command and wait on it. |
| 974 | */ |
| 975 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, |
| 976 | unsigned int ep_index, struct urb *urb) |
| 977 | { |
| 978 | struct xhci_container_ctx *in_ctx; |
| 979 | struct xhci_container_ctx *out_ctx; |
| 980 | struct xhci_input_control_ctx *ctrl_ctx; |
| 981 | struct xhci_ep_ctx *ep_ctx; |
| 982 | int max_packet_size; |
| 983 | int hw_max_packet_size; |
| 984 | int ret = 0; |
| 985 | |
| 986 | out_ctx = xhci->devs[slot_id]->out_ctx; |
| 987 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 988 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
Kuninori Morimoto | 29cc889 | 2011-08-23 03:12:03 -0700 | [diff] [blame] | 989 | max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 990 | if (hw_max_packet_size != max_packet_size) { |
| 991 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); |
| 992 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", |
| 993 | max_packet_size); |
| 994 | xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", |
| 995 | hw_max_packet_size); |
| 996 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); |
| 997 | |
| 998 | /* Set up the modified control endpoint 0 */ |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 999 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
| 1000 | xhci->devs[slot_id]->out_ctx, ep_index); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1001 | in_ctx = xhci->devs[slot_id]->in_ctx; |
| 1002 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1003 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
| 1004 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1005 | |
| 1006 | /* Set up the input context flags for the command */ |
| 1007 | /* FIXME: This won't work if a non-default control endpoint |
| 1008 | * changes max packet sizes. |
| 1009 | */ |
| 1010 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1011 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1012 | ctrl_ctx->drop_flags = 0; |
| 1013 | |
| 1014 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); |
| 1015 | xhci_dbg_ctx(xhci, in_ctx, ep_index); |
| 1016 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); |
| 1017 | xhci_dbg_ctx(xhci, out_ctx, ep_index); |
| 1018 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1019 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, |
| 1020 | true, false); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1021 | |
| 1022 | /* Clean up the input context for later use by bandwidth |
| 1023 | * functions. |
| 1024 | */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1025 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1026 | } |
| 1027 | return ret; |
| 1028 | } |
| 1029 | |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1030 | /* |
| 1031 | * non-error returns are a promise to giveback() the urb later |
| 1032 | * we drop ownership so next owner (or urb unlink) can get it |
| 1033 | */ |
| 1034 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
| 1035 | { |
| 1036 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1037 | struct xhci_td *buffer; |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1038 | unsigned long flags; |
| 1039 | int ret = 0; |
| 1040 | unsigned int slot_id, ep_index; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1041 | struct urb_priv *urb_priv; |
| 1042 | int size, i; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1043 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1044 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, |
| 1045 | true, true, __func__) <= 0) |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1046 | return -EINVAL; |
| 1047 | |
| 1048 | slot_id = urb->dev->slot_id; |
| 1049 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1050 | |
Alan Stern | 541c7d4 | 2010-06-22 16:39:10 -0400 | [diff] [blame] | 1051 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1052 | if (!in_interrupt()) |
| 1053 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); |
| 1054 | ret = -ESHUTDOWN; |
| 1055 | goto exit; |
| 1056 | } |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1057 | |
| 1058 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) |
| 1059 | size = urb->number_of_packets; |
| 1060 | else |
| 1061 | size = 1; |
| 1062 | |
| 1063 | urb_priv = kzalloc(sizeof(struct urb_priv) + |
| 1064 | size * sizeof(struct xhci_td *), mem_flags); |
| 1065 | if (!urb_priv) |
| 1066 | return -ENOMEM; |
| 1067 | |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1068 | buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); |
| 1069 | if (!buffer) { |
| 1070 | kfree(urb_priv); |
| 1071 | return -ENOMEM; |
| 1072 | } |
| 1073 | |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1074 | for (i = 0; i < size; i++) { |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1075 | urb_priv->td[i] = buffer; |
| 1076 | buffer++; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1077 | } |
| 1078 | |
| 1079 | urb_priv->length = size; |
| 1080 | urb_priv->td_cnt = 0; |
| 1081 | urb->hcpriv = urb_priv; |
| 1082 | |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1083 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
| 1084 | /* Check to see if the max packet size for the default control |
| 1085 | * endpoint changed during FS device enumeration |
| 1086 | */ |
| 1087 | if (urb->dev->speed == USB_SPEED_FULL) { |
| 1088 | ret = xhci_check_maxpacket(xhci, slot_id, |
| 1089 | ep_index, urb); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1090 | if (ret < 0) { |
| 1091 | xhci_urb_free_priv(xhci, urb_priv); |
| 1092 | urb->hcpriv = NULL; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1093 | return ret; |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1094 | } |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1095 | } |
| 1096 | |
Sarah Sharp | b11069f | 2009-07-27 12:03:23 -0700 | [diff] [blame] | 1097 | /* We have a spinlock and interrupts disabled, so we must pass |
| 1098 | * atomic context to this function, which may allocate memory. |
| 1099 | */ |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1100 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1101 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1102 | goto dying; |
Sarah Sharp | b11069f | 2009-07-27 12:03:23 -0700 | [diff] [blame] | 1103 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 1104 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1105 | if (ret) |
| 1106 | goto free_priv; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1107 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1108 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { |
| 1109 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1110 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1111 | goto dying; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 1112 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
| 1113 | EP_GETTING_STREAMS) { |
| 1114 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " |
| 1115 | "is transitioning to using streams.\n"); |
| 1116 | ret = -EINVAL; |
| 1117 | } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
| 1118 | EP_GETTING_NO_STREAMS) { |
| 1119 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " |
| 1120 | "is transitioning to " |
| 1121 | "not having streams.\n"); |
| 1122 | ret = -EINVAL; |
| 1123 | } else { |
| 1124 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
| 1125 | slot_id, ep_index); |
| 1126 | } |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1127 | if (ret) |
| 1128 | goto free_priv; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1129 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1130 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
| 1131 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1132 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1133 | goto dying; |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1134 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
| 1135 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1136 | if (ret) |
| 1137 | goto free_priv; |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1138 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1139 | } else { |
Andiry Xu | 787f4e5 | 2010-07-22 15:23:52 -0700 | [diff] [blame] | 1140 | spin_lock_irqsave(&xhci->lock, flags); |
| 1141 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1142 | goto dying; |
| 1143 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, |
| 1144 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1145 | if (ret) |
| 1146 | goto free_priv; |
Andiry Xu | 787f4e5 | 2010-07-22 15:23:52 -0700 | [diff] [blame] | 1147 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1148 | } |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1149 | exit: |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1150 | return ret; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1151 | dying: |
| 1152 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " |
| 1153 | "non-responsive xHCI host.\n", |
| 1154 | urb->ep->desc.bEndpointAddress, urb); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1155 | ret = -ESHUTDOWN; |
| 1156 | free_priv: |
| 1157 | xhci_urb_free_priv(xhci, urb_priv); |
| 1158 | urb->hcpriv = NULL; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1159 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1160 | return ret; |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1161 | } |
| 1162 | |
Sarah Sharp | 021bff9 | 2010-07-29 22:12:20 -0700 | [diff] [blame] | 1163 | /* Get the right ring for the given URB. |
| 1164 | * If the endpoint supports streams, boundary check the URB's stream ID. |
| 1165 | * If the endpoint doesn't support streams, return the singular endpoint ring. |
| 1166 | */ |
| 1167 | static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, |
| 1168 | struct urb *urb) |
| 1169 | { |
| 1170 | unsigned int slot_id; |
| 1171 | unsigned int ep_index; |
| 1172 | unsigned int stream_id; |
| 1173 | struct xhci_virt_ep *ep; |
| 1174 | |
| 1175 | slot_id = urb->dev->slot_id; |
| 1176 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
| 1177 | stream_id = urb->stream_id; |
| 1178 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
| 1179 | /* Common case: no streams */ |
| 1180 | if (!(ep->ep_state & EP_HAS_STREAMS)) |
| 1181 | return ep->ring; |
| 1182 | |
| 1183 | if (stream_id == 0) { |
| 1184 | xhci_warn(xhci, |
| 1185 | "WARN: Slot ID %u, ep index %u has streams, " |
| 1186 | "but URB has no stream ID.\n", |
| 1187 | slot_id, ep_index); |
| 1188 | return NULL; |
| 1189 | } |
| 1190 | |
| 1191 | if (stream_id < ep->stream_info->num_streams) |
| 1192 | return ep->stream_info->stream_rings[stream_id]; |
| 1193 | |
| 1194 | xhci_warn(xhci, |
| 1195 | "WARN: Slot ID %u, ep index %u has " |
| 1196 | "stream IDs 1 to %u allocated, " |
| 1197 | "but stream ID %u is requested.\n", |
| 1198 | slot_id, ep_index, |
| 1199 | ep->stream_info->num_streams - 1, |
| 1200 | stream_id); |
| 1201 | return NULL; |
| 1202 | } |
| 1203 | |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1204 | /* |
| 1205 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop |
| 1206 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC |
| 1207 | * should pick up where it left off in the TD, unless a Set Transfer Ring |
| 1208 | * Dequeue Pointer is issued. |
| 1209 | * |
| 1210 | * The TRBs that make up the buffers for the canceled URB will be "removed" from |
| 1211 | * the ring. Since the ring is a contiguous structure, they can't be physically |
| 1212 | * removed. Instead, there are two options: |
| 1213 | * |
| 1214 | * 1) If the HC is in the middle of processing the URB to be canceled, we |
| 1215 | * simply move the ring's dequeue pointer past those TRBs using the Set |
| 1216 | * Transfer Ring Dequeue Pointer command. This will be the common case, |
| 1217 | * when drivers timeout on the last submitted URB and attempt to cancel. |
| 1218 | * |
| 1219 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a |
| 1220 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The |
| 1221 | * HC will need to invalidate the any TRBs it has cached after the stop |
| 1222 | * endpoint command, as noted in the xHCI 0.95 errata. |
| 1223 | * |
| 1224 | * 3) The TD may have completed by the time the Stop Endpoint Command |
| 1225 | * completes, so software needs to handle that case too. |
| 1226 | * |
| 1227 | * This function should protect against the TD enqueueing code ringing the |
| 1228 | * doorbell while this code is waiting for a Stop Endpoint command to complete. |
| 1229 | * It also needs to account for multiple cancellations on happening at the same |
| 1230 | * time for the same endpoint. |
| 1231 | * |
| 1232 | * Note that this function can be called in any context, or so says |
| 1233 | * usb_hcd_unlink_urb() |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1234 | */ |
| 1235 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
| 1236 | { |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1237 | unsigned long flags; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1238 | int ret, i; |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1239 | u32 temp; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1240 | struct xhci_hcd *xhci; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1241 | struct urb_priv *urb_priv; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1242 | struct xhci_td *td; |
| 1243 | unsigned int ep_index; |
| 1244 | struct xhci_ring *ep_ring; |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 1245 | struct xhci_virt_ep *ep; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1246 | |
| 1247 | xhci = hcd_to_xhci(hcd); |
| 1248 | spin_lock_irqsave(&xhci->lock, flags); |
| 1249 | /* Make sure the URB hasn't completed or been unlinked already */ |
| 1250 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
| 1251 | if (ret || !urb->hcpriv) |
| 1252 | goto done; |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1253 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 1254 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1255 | xhci_dbg(xhci, "HW died, freeing TD.\n"); |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1256 | urb_priv = urb->hcpriv; |
Sarah Sharp | 585df1d | 2011-08-02 15:43:40 -0700 | [diff] [blame] | 1257 | for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { |
| 1258 | td = urb_priv->td[i]; |
| 1259 | if (!list_empty(&td->td_list)) |
| 1260 | list_del_init(&td->td_list); |
| 1261 | if (!list_empty(&td->cancelled_td_list)) |
| 1262 | list_del_init(&td->cancelled_td_list); |
| 1263 | } |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1264 | |
| 1265 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
| 1266 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 214f76f | 2010-10-26 11:22:02 -0700 | [diff] [blame] | 1267 | usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1268 | xhci_urb_free_priv(xhci, urb_priv); |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1269 | return ret; |
| 1270 | } |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 1271 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
| 1272 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1273 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " |
| 1274 | "non-responsive xHCI host.\n", |
| 1275 | urb->ep->desc.bEndpointAddress, urb); |
| 1276 | /* Let the stop endpoint command watchdog timer (which set this |
| 1277 | * state) finish cleaning up the endpoint TD lists. We must |
| 1278 | * have caught it in the middle of dropping a lock and giving |
| 1279 | * back an URB. |
| 1280 | */ |
| 1281 | goto done; |
| 1282 | } |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1283 | |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 1284 | xhci_dbg(xhci, "Cancel URB %p\n", urb); |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 1285 | xhci_dbg(xhci, "Event ring:\n"); |
| 1286 | xhci_debug_ring(xhci, xhci->event_ring); |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1287 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 1288 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 1289 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
| 1290 | if (!ep_ring) { |
| 1291 | ret = -EINVAL; |
| 1292 | goto done; |
| 1293 | } |
| 1294 | |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 1295 | xhci_dbg(xhci, "Endpoint ring:\n"); |
| 1296 | xhci_debug_ring(xhci, ep_ring); |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1297 | |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1298 | urb_priv = urb->hcpriv; |
| 1299 | |
| 1300 | for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { |
| 1301 | td = urb_priv->td[i]; |
| 1302 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
| 1303 | } |
| 1304 | |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1305 | /* Queue a stop endpoint command, but only if this is |
| 1306 | * the first cancellation to be handled. |
| 1307 | */ |
Sarah Sharp | 678539c | 2009-10-27 10:55:52 -0700 | [diff] [blame] | 1308 | if (!(ep->ep_state & EP_HALT_PENDING)) { |
| 1309 | ep->ep_state |= EP_HALT_PENDING; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1310 | ep->stop_cmds_pending++; |
| 1311 | ep->stop_cmd_timer.expires = jiffies + |
| 1312 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; |
| 1313 | add_timer(&ep->stop_cmd_timer); |
Andiry Xu | be88fe4 | 2010-10-14 07:22:57 -0700 | [diff] [blame] | 1314 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 1315 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1316 | } |
| 1317 | done: |
| 1318 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1319 | return ret; |
Sarah Sharp | d0e96f5 | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1320 | } |
| 1321 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1322 | /* Drop an endpoint from a new bandwidth configuration for this device. |
| 1323 | * Only one call to this function is allowed per endpoint before |
| 1324 | * check_bandwidth() or reset_bandwidth() must be called. |
| 1325 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
| 1326 | * add the endpoint to the schedule with possibly new parameters denoted by a |
| 1327 | * different endpoint descriptor in usb_host_endpoint. |
| 1328 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
| 1329 | * not allowed. |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1330 | * |
| 1331 | * The USB core will not allow URBs to be queued to an endpoint that is being |
| 1332 | * disabled, so there's no need for mutual exclusion to protect |
| 1333 | * the xhci->devs[slot_id] structure. |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1334 | */ |
| 1335 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
| 1336 | struct usb_host_endpoint *ep) |
| 1337 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1338 | struct xhci_hcd *xhci; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1339 | struct xhci_container_ctx *in_ctx, *out_ctx; |
| 1340 | struct xhci_input_control_ctx *ctrl_ctx; |
| 1341 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1342 | unsigned int last_ctx; |
| 1343 | unsigned int ep_index; |
| 1344 | struct xhci_ep_ctx *ep_ctx; |
| 1345 | u32 drop_flag; |
| 1346 | u32 new_add_flags, new_drop_flags, new_slot_info; |
| 1347 | int ret; |
| 1348 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1349 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1350 | if (ret <= 0) |
| 1351 | return ret; |
| 1352 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1353 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1354 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1355 | |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1356 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1357 | drop_flag = xhci_get_endpoint_flag(&ep->desc); |
| 1358 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { |
| 1359 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", |
| 1360 | __func__, drop_flag); |
| 1361 | return 0; |
| 1362 | } |
| 1363 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1364 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1365 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
| 1366 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1367 | ep_index = xhci_get_endpoint_index(&ep->desc); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1368 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1369 | /* If the HC already knows the endpoint is disabled, |
| 1370 | * or the HCD has noted it is disabled, ignore this request |
| 1371 | */ |
Matt Evans | f5960b6 | 2011-06-01 10:22:55 +1000 | [diff] [blame] | 1372 | if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == |
| 1373 | cpu_to_le32(EP_STATE_DISABLED)) || |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1374 | le32_to_cpu(ctrl_ctx->drop_flags) & |
| 1375 | xhci_get_endpoint_flag(&ep->desc)) { |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 1376 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
| 1377 | __func__, ep); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1378 | return 0; |
| 1379 | } |
| 1380 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1381 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
| 1382 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1383 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1384 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
| 1385 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1386 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1387 | last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1388 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1389 | /* Update the last valid endpoint context, if we deleted the last one */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1390 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > |
| 1391 | LAST_CTX(last_ctx)) { |
| 1392 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
| 1393 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1394 | } |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1395 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1396 | |
| 1397 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
| 1398 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1399 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
| 1400 | (unsigned int) ep->desc.bEndpointAddress, |
| 1401 | udev->slot_id, |
| 1402 | (unsigned int) new_drop_flags, |
| 1403 | (unsigned int) new_add_flags, |
| 1404 | (unsigned int) new_slot_info); |
| 1405 | return 0; |
| 1406 | } |
| 1407 | |
| 1408 | /* Add an endpoint to a new possible bandwidth configuration for this device. |
| 1409 | * Only one call to this function is allowed per endpoint before |
| 1410 | * check_bandwidth() or reset_bandwidth() must be called. |
| 1411 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
| 1412 | * add the endpoint to the schedule with possibly new parameters denoted by a |
| 1413 | * different endpoint descriptor in usb_host_endpoint. |
| 1414 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
| 1415 | * not allowed. |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1416 | * |
| 1417 | * The USB core will not allow URBs to be queued to an endpoint until the |
| 1418 | * configuration or alt setting is installed in the device, so there's no need |
| 1419 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1420 | */ |
| 1421 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
| 1422 | struct usb_host_endpoint *ep) |
| 1423 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1424 | struct xhci_hcd *xhci; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1425 | struct xhci_container_ctx *in_ctx, *out_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1426 | unsigned int ep_index; |
| 1427 | struct xhci_ep_ctx *ep_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1428 | struct xhci_slot_ctx *slot_ctx; |
| 1429 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1430 | u32 added_ctxs; |
| 1431 | unsigned int last_ctx; |
| 1432 | u32 new_add_flags, new_drop_flags, new_slot_info; |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1433 | struct xhci_virt_device *virt_dev; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1434 | int ret = 0; |
| 1435 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1436 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1437 | if (ret <= 0) { |
| 1438 | /* So we won't queue a reset ep command for a root hub */ |
| 1439 | ep->hcpriv = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1440 | return ret; |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1441 | } |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1442 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1443 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1444 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1445 | |
| 1446 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); |
| 1447 | last_ctx = xhci_last_valid_endpoint(added_ctxs); |
| 1448 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { |
| 1449 | /* FIXME when we have to issue an evaluate endpoint command to |
| 1450 | * deal with ep0 max packet size changing once we get the |
| 1451 | * descriptors |
| 1452 | */ |
| 1453 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", |
| 1454 | __func__, added_ctxs); |
| 1455 | return 0; |
| 1456 | } |
| 1457 | |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1458 | virt_dev = xhci->devs[udev->slot_id]; |
| 1459 | in_ctx = virt_dev->in_ctx; |
| 1460 | out_ctx = virt_dev->out_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1461 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1462 | ep_index = xhci_get_endpoint_index(&ep->desc); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1463 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1464 | |
| 1465 | /* If this endpoint is already in use, and the upper layers are trying |
| 1466 | * to add it again without dropping it, reject the addition. |
| 1467 | */ |
| 1468 | if (virt_dev->eps[ep_index].ring && |
| 1469 | !(le32_to_cpu(ctrl_ctx->drop_flags) & |
| 1470 | xhci_get_endpoint_flag(&ep->desc))) { |
| 1471 | xhci_warn(xhci, "Trying to add endpoint 0x%x " |
| 1472 | "without dropping it.\n", |
| 1473 | (unsigned int) ep->desc.bEndpointAddress); |
| 1474 | return -EINVAL; |
| 1475 | } |
| 1476 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1477 | /* If the HCD has already noted the endpoint is enabled, |
| 1478 | * ignore this request. |
| 1479 | */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1480 | if (le32_to_cpu(ctrl_ctx->add_flags) & |
| 1481 | xhci_get_endpoint_flag(&ep->desc)) { |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 1482 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
| 1483 | __func__, ep); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1484 | return 0; |
| 1485 | } |
| 1486 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1487 | /* |
| 1488 | * Configuration and alternate setting changes must be done in |
| 1489 | * process context, not interrupt context (or so documenation |
| 1490 | * for usb_set_interface() and usb_set_configuration() claim). |
| 1491 | */ |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1492 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1493 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", |
| 1494 | __func__, ep->desc.bEndpointAddress); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1495 | return -ENOMEM; |
| 1496 | } |
| 1497 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1498 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
| 1499 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1500 | |
| 1501 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
| 1502 | * xHC hasn't been notified yet through the check_bandwidth() call, |
| 1503 | * this re-adds a new state for the endpoint from the new endpoint |
| 1504 | * descriptors. We must drop and re-add this endpoint, so we leave the |
| 1505 | * drop flags alone. |
| 1506 | */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1507 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1508 | |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1509 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1510 | /* Update the last valid endpoint context, if we just added one past */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1511 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < |
| 1512 | LAST_CTX(last_ctx)) { |
| 1513 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
| 1514 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1515 | } |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1516 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1517 | |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1518 | /* Store the usb_device pointer for later use */ |
| 1519 | ep->hcpriv = udev; |
| 1520 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1521 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
| 1522 | (unsigned int) ep->desc.bEndpointAddress, |
| 1523 | udev->slot_id, |
| 1524 | (unsigned int) new_drop_flags, |
| 1525 | (unsigned int) new_add_flags, |
| 1526 | (unsigned int) new_slot_info); |
| 1527 | return 0; |
| 1528 | } |
| 1529 | |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1530 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1531 | { |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1532 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1533 | struct xhci_ep_ctx *ep_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1534 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1535 | int i; |
| 1536 | |
| 1537 | /* When a device's add flag and drop flag are zero, any subsequent |
| 1538 | * configure endpoint command will leave that endpoint's state |
| 1539 | * untouched. Make sure we don't leave any old state in the input |
| 1540 | * endpoint contexts. |
| 1541 | */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1542 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
| 1543 | ctrl_ctx->drop_flags = 0; |
| 1544 | ctrl_ctx->add_flags = 0; |
| 1545 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1546 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1547 | /* Endpoint 0 is always valid */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1548 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1549 | for (i = 1; i < 31; ++i) { |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1550 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1551 | ep_ctx->ep_info = 0; |
| 1552 | ep_ctx->ep_info2 = 0; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 1553 | ep_ctx->deq = 0; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1554 | ep_ctx->tx_info = 0; |
| 1555 | } |
| 1556 | } |
| 1557 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1558 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
Sarah Sharp | 00161f7 | 2011-04-28 12:23:23 -0700 | [diff] [blame] | 1559 | struct usb_device *udev, u32 *cmd_status) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1560 | { |
| 1561 | int ret; |
| 1562 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1563 | switch (*cmd_status) { |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1564 | case COMP_ENOMEM: |
| 1565 | dev_warn(&udev->dev, "Not enough host controller resources " |
| 1566 | "for new device state.\n"); |
| 1567 | ret = -ENOMEM; |
| 1568 | /* FIXME: can we allocate more resources for the HC? */ |
| 1569 | break; |
| 1570 | case COMP_BW_ERR: |
| 1571 | dev_warn(&udev->dev, "Not enough bandwidth " |
| 1572 | "for new device state.\n"); |
| 1573 | ret = -ENOSPC; |
| 1574 | /* FIXME: can we go back to the old state? */ |
| 1575 | break; |
| 1576 | case COMP_TRB_ERR: |
| 1577 | /* the HCD set up something wrong */ |
| 1578 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " |
| 1579 | "add flag = 1, " |
| 1580 | "and endpoint is not disabled.\n"); |
| 1581 | ret = -EINVAL; |
| 1582 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 1583 | case COMP_DEV_ERR: |
| 1584 | dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint " |
| 1585 | "configure command.\n"); |
| 1586 | ret = -ENODEV; |
| 1587 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1588 | case COMP_SUCCESS: |
| 1589 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); |
| 1590 | ret = 0; |
| 1591 | break; |
| 1592 | default: |
| 1593 | xhci_err(xhci, "ERROR: unexpected command completion " |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1594 | "code 0x%x.\n", *cmd_status); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1595 | ret = -EINVAL; |
| 1596 | break; |
| 1597 | } |
| 1598 | return ret; |
| 1599 | } |
| 1600 | |
| 1601 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
Sarah Sharp | 00161f7 | 2011-04-28 12:23:23 -0700 | [diff] [blame] | 1602 | struct usb_device *udev, u32 *cmd_status) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1603 | { |
| 1604 | int ret; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1605 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1606 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1607 | switch (*cmd_status) { |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1608 | case COMP_EINVAL: |
| 1609 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " |
| 1610 | "context command.\n"); |
| 1611 | ret = -EINVAL; |
| 1612 | break; |
| 1613 | case COMP_EBADSLT: |
| 1614 | dev_warn(&udev->dev, "WARN: slot not enabled for" |
| 1615 | "evaluate context command.\n"); |
| 1616 | case COMP_CTX_STATE: |
| 1617 | dev_warn(&udev->dev, "WARN: invalid context state for " |
| 1618 | "evaluate context command.\n"); |
| 1619 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); |
| 1620 | ret = -EINVAL; |
| 1621 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 1622 | case COMP_DEV_ERR: |
| 1623 | dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate " |
| 1624 | "context command.\n"); |
| 1625 | ret = -ENODEV; |
| 1626 | break; |
Alex He | 1bb73a8 | 2011-05-05 18:14:12 +0800 | [diff] [blame] | 1627 | case COMP_MEL_ERR: |
| 1628 | /* Max Exit Latency too large error */ |
| 1629 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); |
| 1630 | ret = -EINVAL; |
| 1631 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1632 | case COMP_SUCCESS: |
| 1633 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); |
| 1634 | ret = 0; |
| 1635 | break; |
| 1636 | default: |
| 1637 | xhci_err(xhci, "ERROR: unexpected command completion " |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1638 | "code 0x%x.\n", *cmd_status); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1639 | ret = -EINVAL; |
| 1640 | break; |
| 1641 | } |
| 1642 | return ret; |
| 1643 | } |
| 1644 | |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1645 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, |
| 1646 | struct xhci_container_ctx *in_ctx) |
| 1647 | { |
| 1648 | struct xhci_input_control_ctx *ctrl_ctx; |
| 1649 | u32 valid_add_flags; |
| 1650 | u32 valid_drop_flags; |
| 1651 | |
| 1652 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
| 1653 | /* Ignore the slot flag (bit 0), and the default control endpoint flag |
| 1654 | * (bit 1). The default control endpoint is added during the Address |
| 1655 | * Device command and is never removed until the slot is disabled. |
| 1656 | */ |
| 1657 | valid_add_flags = ctrl_ctx->add_flags >> 2; |
| 1658 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; |
| 1659 | |
| 1660 | /* Use hweight32 to count the number of ones in the add flags, or |
| 1661 | * number of endpoints added. Don't count endpoints that are changed |
| 1662 | * (both added and dropped). |
| 1663 | */ |
| 1664 | return hweight32(valid_add_flags) - |
| 1665 | hweight32(valid_add_flags & valid_drop_flags); |
| 1666 | } |
| 1667 | |
| 1668 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, |
| 1669 | struct xhci_container_ctx *in_ctx) |
| 1670 | { |
| 1671 | struct xhci_input_control_ctx *ctrl_ctx; |
| 1672 | u32 valid_add_flags; |
| 1673 | u32 valid_drop_flags; |
| 1674 | |
| 1675 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
| 1676 | valid_add_flags = ctrl_ctx->add_flags >> 2; |
| 1677 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; |
| 1678 | |
| 1679 | return hweight32(valid_drop_flags) - |
| 1680 | hweight32(valid_add_flags & valid_drop_flags); |
| 1681 | } |
| 1682 | |
| 1683 | /* |
| 1684 | * We need to reserve the new number of endpoints before the configure endpoint |
| 1685 | * command completes. We can't subtract the dropped endpoints from the number |
| 1686 | * of active endpoints until the command completes because we can oversubscribe |
| 1687 | * the host in this case: |
| 1688 | * |
| 1689 | * - the first configure endpoint command drops more endpoints than it adds |
| 1690 | * - a second configure endpoint command that adds more endpoints is queued |
| 1691 | * - the first configure endpoint command fails, so the config is unchanged |
| 1692 | * - the second command may succeed, even though there isn't enough resources |
| 1693 | * |
| 1694 | * Must be called with xhci->lock held. |
| 1695 | */ |
| 1696 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, |
| 1697 | struct xhci_container_ctx *in_ctx) |
| 1698 | { |
| 1699 | u32 added_eps; |
| 1700 | |
| 1701 | added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); |
| 1702 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { |
| 1703 | xhci_dbg(xhci, "Not enough ep ctxs: " |
| 1704 | "%u active, need to add %u, limit is %u.\n", |
| 1705 | xhci->num_active_eps, added_eps, |
| 1706 | xhci->limit_active_eps); |
| 1707 | return -ENOMEM; |
| 1708 | } |
| 1709 | xhci->num_active_eps += added_eps; |
| 1710 | xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, |
| 1711 | xhci->num_active_eps); |
| 1712 | return 0; |
| 1713 | } |
| 1714 | |
| 1715 | /* |
| 1716 | * The configure endpoint was failed by the xHC for some other reason, so we |
| 1717 | * need to revert the resources that failed configuration would have used. |
| 1718 | * |
| 1719 | * Must be called with xhci->lock held. |
| 1720 | */ |
| 1721 | static void xhci_free_host_resources(struct xhci_hcd *xhci, |
| 1722 | struct xhci_container_ctx *in_ctx) |
| 1723 | { |
| 1724 | u32 num_failed_eps; |
| 1725 | |
| 1726 | num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); |
| 1727 | xhci->num_active_eps -= num_failed_eps; |
| 1728 | xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", |
| 1729 | num_failed_eps, |
| 1730 | xhci->num_active_eps); |
| 1731 | } |
| 1732 | |
| 1733 | /* |
| 1734 | * Now that the command has completed, clean up the active endpoint count by |
| 1735 | * subtracting out the endpoints that were dropped (but not changed). |
| 1736 | * |
| 1737 | * Must be called with xhci->lock held. |
| 1738 | */ |
| 1739 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, |
| 1740 | struct xhci_container_ctx *in_ctx) |
| 1741 | { |
| 1742 | u32 num_dropped_eps; |
| 1743 | |
| 1744 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); |
| 1745 | xhci->num_active_eps -= num_dropped_eps; |
| 1746 | if (num_dropped_eps) |
| 1747 | xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", |
| 1748 | num_dropped_eps, |
| 1749 | xhci->num_active_eps); |
| 1750 | } |
| 1751 | |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 1752 | unsigned int xhci_get_block_size(struct usb_device *udev) |
| 1753 | { |
| 1754 | switch (udev->speed) { |
| 1755 | case USB_SPEED_LOW: |
| 1756 | case USB_SPEED_FULL: |
| 1757 | return FS_BLOCK; |
| 1758 | case USB_SPEED_HIGH: |
| 1759 | return HS_BLOCK; |
| 1760 | case USB_SPEED_SUPER: |
| 1761 | return SS_BLOCK; |
| 1762 | case USB_SPEED_UNKNOWN: |
| 1763 | case USB_SPEED_WIRELESS: |
| 1764 | default: |
| 1765 | /* Should never happen */ |
| 1766 | return 1; |
| 1767 | } |
| 1768 | } |
| 1769 | |
| 1770 | unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) |
| 1771 | { |
| 1772 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) |
| 1773 | return LS_OVERHEAD; |
| 1774 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) |
| 1775 | return FS_OVERHEAD; |
| 1776 | return HS_OVERHEAD; |
| 1777 | } |
| 1778 | |
| 1779 | /* If we are changing a LS/FS device under a HS hub, |
| 1780 | * make sure (if we are activating a new TT) that the HS bus has enough |
| 1781 | * bandwidth for this new TT. |
| 1782 | */ |
| 1783 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, |
| 1784 | struct xhci_virt_device *virt_dev, |
| 1785 | int old_active_eps) |
| 1786 | { |
| 1787 | struct xhci_interval_bw_table *bw_table; |
| 1788 | struct xhci_tt_bw_info *tt_info; |
| 1789 | |
| 1790 | /* Find the bandwidth table for the root port this TT is attached to. */ |
| 1791 | bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; |
| 1792 | tt_info = virt_dev->tt_info; |
| 1793 | /* If this TT already had active endpoints, the bandwidth for this TT |
| 1794 | * has already been added. Removing all periodic endpoints (and thus |
| 1795 | * making the TT enactive) will only decrease the bandwidth used. |
| 1796 | */ |
| 1797 | if (old_active_eps) |
| 1798 | return 0; |
| 1799 | if (old_active_eps == 0 && tt_info->active_eps != 0) { |
| 1800 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) |
| 1801 | return -ENOMEM; |
| 1802 | return 0; |
| 1803 | } |
| 1804 | /* Not sure why we would have no new active endpoints... |
| 1805 | * |
| 1806 | * Maybe because of an Evaluate Context change for a hub update or a |
| 1807 | * control endpoint 0 max packet size change? |
| 1808 | * FIXME: skip the bandwidth calculation in that case. |
| 1809 | */ |
| 1810 | return 0; |
| 1811 | } |
| 1812 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame^] | 1813 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, |
| 1814 | struct xhci_virt_device *virt_dev) |
| 1815 | { |
| 1816 | unsigned int bw_reserved; |
| 1817 | |
| 1818 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); |
| 1819 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) |
| 1820 | return -ENOMEM; |
| 1821 | |
| 1822 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); |
| 1823 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) |
| 1824 | return -ENOMEM; |
| 1825 | |
| 1826 | return 0; |
| 1827 | } |
| 1828 | |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 1829 | /* |
| 1830 | * This algorithm is a very conservative estimate of the worst-case scheduling |
| 1831 | * scenario for any one interval. The hardware dynamically schedules the |
| 1832 | * packets, so we can't tell which microframe could be the limiting factor in |
| 1833 | * the bandwidth scheduling. This only takes into account periodic endpoints. |
| 1834 | * |
| 1835 | * Obviously, we can't solve an NP complete problem to find the minimum worst |
| 1836 | * case scenario. Instead, we come up with an estimate that is no less than |
| 1837 | * the worst case bandwidth used for any one microframe, but may be an |
| 1838 | * over-estimate. |
| 1839 | * |
| 1840 | * We walk the requirements for each endpoint by interval, starting with the |
| 1841 | * smallest interval, and place packets in the schedule where there is only one |
| 1842 | * possible way to schedule packets for that interval. In order to simplify |
| 1843 | * this algorithm, we record the largest max packet size for each interval, and |
| 1844 | * assume all packets will be that size. |
| 1845 | * |
| 1846 | * For interval 0, we obviously must schedule all packets for each interval. |
| 1847 | * The bandwidth for interval 0 is just the amount of data to be transmitted |
| 1848 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times |
| 1849 | * the number of packets). |
| 1850 | * |
| 1851 | * For interval 1, we have two possible microframes to schedule those packets |
| 1852 | * in. For this algorithm, if we can schedule the same number of packets for |
| 1853 | * each possible scheduling opportunity (each microframe), we will do so. The |
| 1854 | * remaining number of packets will be saved to be transmitted in the gaps in |
| 1855 | * the next interval's scheduling sequence. |
| 1856 | * |
| 1857 | * As we move those remaining packets to be scheduled with interval 2 packets, |
| 1858 | * we have to double the number of remaining packets to transmit. This is |
| 1859 | * because the intervals are actually powers of 2, and we would be transmitting |
| 1860 | * the previous interval's packets twice in this interval. We also have to be |
| 1861 | * sure that when we look at the largest max packet size for this interval, we |
| 1862 | * also look at the largest max packet size for the remaining packets and take |
| 1863 | * the greater of the two. |
| 1864 | * |
| 1865 | * The algorithm continues to evenly distribute packets in each scheduling |
| 1866 | * opportunity, and push the remaining packets out, until we get to the last |
| 1867 | * interval. Then those packets and their associated overhead are just added |
| 1868 | * to the bandwidth used. |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 1869 | */ |
| 1870 | static int xhci_check_bw_table(struct xhci_hcd *xhci, |
| 1871 | struct xhci_virt_device *virt_dev, |
| 1872 | int old_active_eps) |
| 1873 | { |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 1874 | unsigned int bw_reserved; |
| 1875 | unsigned int max_bandwidth; |
| 1876 | unsigned int bw_used; |
| 1877 | unsigned int block_size; |
| 1878 | struct xhci_interval_bw_table *bw_table; |
| 1879 | unsigned int packet_size = 0; |
| 1880 | unsigned int overhead = 0; |
| 1881 | unsigned int packets_transmitted = 0; |
| 1882 | unsigned int packets_remaining = 0; |
| 1883 | unsigned int i; |
| 1884 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame^] | 1885 | if (virt_dev->udev->speed == USB_SPEED_SUPER) |
| 1886 | return xhci_check_ss_bw(xhci, virt_dev); |
| 1887 | |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 1888 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { |
| 1889 | max_bandwidth = HS_BW_LIMIT; |
| 1890 | /* Convert percent of bus BW reserved to blocks reserved */ |
| 1891 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); |
| 1892 | } else { |
| 1893 | max_bandwidth = FS_BW_LIMIT; |
| 1894 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); |
| 1895 | } |
| 1896 | |
| 1897 | bw_table = virt_dev->bw_table; |
| 1898 | /* We need to translate the max packet size and max ESIT payloads into |
| 1899 | * the units the hardware uses. |
| 1900 | */ |
| 1901 | block_size = xhci_get_block_size(virt_dev->udev); |
| 1902 | |
| 1903 | /* If we are manipulating a LS/FS device under a HS hub, double check |
| 1904 | * that the HS bus has enough bandwidth if we are activing a new TT. |
| 1905 | */ |
| 1906 | if (virt_dev->tt_info) { |
| 1907 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", |
| 1908 | virt_dev->real_port); |
| 1909 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { |
| 1910 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " |
| 1911 | "newly activated TT.\n"); |
| 1912 | return -ENOMEM; |
| 1913 | } |
| 1914 | xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", |
| 1915 | virt_dev->tt_info->slot_id, |
| 1916 | virt_dev->tt_info->ttport); |
| 1917 | } else { |
| 1918 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", |
| 1919 | virt_dev->real_port); |
| 1920 | } |
| 1921 | |
| 1922 | /* Add in how much bandwidth will be used for interval zero, or the |
| 1923 | * rounded max ESIT payload + number of packets * largest overhead. |
| 1924 | */ |
| 1925 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + |
| 1926 | bw_table->interval_bw[0].num_packets * |
| 1927 | xhci_get_largest_overhead(&bw_table->interval_bw[0]); |
| 1928 | |
| 1929 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { |
| 1930 | unsigned int bw_added; |
| 1931 | unsigned int largest_mps; |
| 1932 | unsigned int interval_overhead; |
| 1933 | |
| 1934 | /* |
| 1935 | * How many packets could we transmit in this interval? |
| 1936 | * If packets didn't fit in the previous interval, we will need |
| 1937 | * to transmit that many packets twice within this interval. |
| 1938 | */ |
| 1939 | packets_remaining = 2 * packets_remaining + |
| 1940 | bw_table->interval_bw[i].num_packets; |
| 1941 | |
| 1942 | /* Find the largest max packet size of this or the previous |
| 1943 | * interval. |
| 1944 | */ |
| 1945 | if (list_empty(&bw_table->interval_bw[i].endpoints)) |
| 1946 | largest_mps = 0; |
| 1947 | else { |
| 1948 | struct xhci_virt_ep *virt_ep; |
| 1949 | struct list_head *ep_entry; |
| 1950 | |
| 1951 | ep_entry = bw_table->interval_bw[i].endpoints.next; |
| 1952 | virt_ep = list_entry(ep_entry, |
| 1953 | struct xhci_virt_ep, bw_endpoint_list); |
| 1954 | /* Convert to blocks, rounding up */ |
| 1955 | largest_mps = DIV_ROUND_UP( |
| 1956 | virt_ep->bw_info.max_packet_size, |
| 1957 | block_size); |
| 1958 | } |
| 1959 | if (largest_mps > packet_size) |
| 1960 | packet_size = largest_mps; |
| 1961 | |
| 1962 | /* Use the larger overhead of this or the previous interval. */ |
| 1963 | interval_overhead = xhci_get_largest_overhead( |
| 1964 | &bw_table->interval_bw[i]); |
| 1965 | if (interval_overhead > overhead) |
| 1966 | overhead = interval_overhead; |
| 1967 | |
| 1968 | /* How many packets can we evenly distribute across |
| 1969 | * (1 << (i + 1)) possible scheduling opportunities? |
| 1970 | */ |
| 1971 | packets_transmitted = packets_remaining >> (i + 1); |
| 1972 | |
| 1973 | /* Add in the bandwidth used for those scheduled packets */ |
| 1974 | bw_added = packets_transmitted * (overhead + packet_size); |
| 1975 | |
| 1976 | /* How many packets do we have remaining to transmit? */ |
| 1977 | packets_remaining = packets_remaining % (1 << (i + 1)); |
| 1978 | |
| 1979 | /* What largest max packet size should those packets have? */ |
| 1980 | /* If we've transmitted all packets, don't carry over the |
| 1981 | * largest packet size. |
| 1982 | */ |
| 1983 | if (packets_remaining == 0) { |
| 1984 | packet_size = 0; |
| 1985 | overhead = 0; |
| 1986 | } else if (packets_transmitted > 0) { |
| 1987 | /* Otherwise if we do have remaining packets, and we've |
| 1988 | * scheduled some packets in this interval, take the |
| 1989 | * largest max packet size from endpoints with this |
| 1990 | * interval. |
| 1991 | */ |
| 1992 | packet_size = largest_mps; |
| 1993 | overhead = interval_overhead; |
| 1994 | } |
| 1995 | /* Otherwise carry over packet_size and overhead from the last |
| 1996 | * time we had a remainder. |
| 1997 | */ |
| 1998 | bw_used += bw_added; |
| 1999 | if (bw_used > max_bandwidth) { |
| 2000 | xhci_warn(xhci, "Not enough bandwidth. " |
| 2001 | "Proposed: %u, Max: %u\n", |
| 2002 | bw_used, max_bandwidth); |
| 2003 | return -ENOMEM; |
| 2004 | } |
| 2005 | } |
| 2006 | /* |
| 2007 | * Ok, we know we have some packets left over after even-handedly |
| 2008 | * scheduling interval 15. We don't know which microframes they will |
| 2009 | * fit into, so we over-schedule and say they will be scheduled every |
| 2010 | * microframe. |
| 2011 | */ |
| 2012 | if (packets_remaining > 0) |
| 2013 | bw_used += overhead + packet_size; |
| 2014 | |
| 2015 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { |
| 2016 | unsigned int port_index = virt_dev->real_port - 1; |
| 2017 | |
| 2018 | /* OK, we're manipulating a HS device attached to a |
| 2019 | * root port bandwidth domain. Include the number of active TTs |
| 2020 | * in the bandwidth used. |
| 2021 | */ |
| 2022 | bw_used += TT_HS_OVERHEAD * |
| 2023 | xhci->rh_bw[port_index].num_active_tts; |
| 2024 | } |
| 2025 | |
| 2026 | xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " |
| 2027 | "Available: %u " "percent\n", |
| 2028 | bw_used, max_bandwidth, bw_reserved, |
| 2029 | (max_bandwidth - bw_used - bw_reserved) * 100 / |
| 2030 | max_bandwidth); |
| 2031 | |
| 2032 | bw_used += bw_reserved; |
| 2033 | if (bw_used > max_bandwidth) { |
| 2034 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", |
| 2035 | bw_used, max_bandwidth); |
| 2036 | return -ENOMEM; |
| 2037 | } |
| 2038 | |
| 2039 | bw_table->bw_used = bw_used; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2040 | return 0; |
| 2041 | } |
| 2042 | |
| 2043 | static bool xhci_is_async_ep(unsigned int ep_type) |
| 2044 | { |
| 2045 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && |
| 2046 | ep_type != ISOC_IN_EP && |
| 2047 | ep_type != INT_IN_EP); |
| 2048 | } |
| 2049 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame^] | 2050 | static bool xhci_is_sync_in_ep(unsigned int ep_type) |
| 2051 | { |
| 2052 | return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP); |
| 2053 | } |
| 2054 | |
| 2055 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) |
| 2056 | { |
| 2057 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); |
| 2058 | |
| 2059 | if (ep_bw->ep_interval == 0) |
| 2060 | return SS_OVERHEAD_BURST + |
| 2061 | (ep_bw->mult * ep_bw->num_packets * |
| 2062 | (SS_OVERHEAD + mps)); |
| 2063 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * |
| 2064 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), |
| 2065 | 1 << ep_bw->ep_interval); |
| 2066 | |
| 2067 | } |
| 2068 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2069 | void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, |
| 2070 | struct xhci_bw_info *ep_bw, |
| 2071 | struct xhci_interval_bw_table *bw_table, |
| 2072 | struct usb_device *udev, |
| 2073 | struct xhci_virt_ep *virt_ep, |
| 2074 | struct xhci_tt_bw_info *tt_info) |
| 2075 | { |
| 2076 | struct xhci_interval_bw *interval_bw; |
| 2077 | int normalized_interval; |
| 2078 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame^] | 2079 | if (xhci_is_async_ep(ep_bw->type)) |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2080 | return; |
| 2081 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame^] | 2082 | if (udev->speed == USB_SPEED_SUPER) { |
| 2083 | if (xhci_is_sync_in_ep(ep_bw->type)) |
| 2084 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= |
| 2085 | xhci_get_ss_bw_consumed(ep_bw); |
| 2086 | else |
| 2087 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= |
| 2088 | xhci_get_ss_bw_consumed(ep_bw); |
| 2089 | return; |
| 2090 | } |
| 2091 | |
| 2092 | /* SuperSpeed endpoints never get added to intervals in the table, so |
| 2093 | * this check is only valid for HS/FS/LS devices. |
| 2094 | */ |
| 2095 | if (list_empty(&virt_ep->bw_endpoint_list)) |
| 2096 | return; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2097 | /* For LS/FS devices, we need to translate the interval expressed in |
| 2098 | * microframes to frames. |
| 2099 | */ |
| 2100 | if (udev->speed == USB_SPEED_HIGH) |
| 2101 | normalized_interval = ep_bw->ep_interval; |
| 2102 | else |
| 2103 | normalized_interval = ep_bw->ep_interval - 3; |
| 2104 | |
| 2105 | if (normalized_interval == 0) |
| 2106 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; |
| 2107 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
| 2108 | interval_bw->num_packets -= ep_bw->num_packets; |
| 2109 | switch (udev->speed) { |
| 2110 | case USB_SPEED_LOW: |
| 2111 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; |
| 2112 | break; |
| 2113 | case USB_SPEED_FULL: |
| 2114 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; |
| 2115 | break; |
| 2116 | case USB_SPEED_HIGH: |
| 2117 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; |
| 2118 | break; |
| 2119 | case USB_SPEED_SUPER: |
| 2120 | case USB_SPEED_UNKNOWN: |
| 2121 | case USB_SPEED_WIRELESS: |
| 2122 | /* Should never happen because only LS/FS/HS endpoints will get |
| 2123 | * added to the endpoint list. |
| 2124 | */ |
| 2125 | return; |
| 2126 | } |
| 2127 | if (tt_info) |
| 2128 | tt_info->active_eps -= 1; |
| 2129 | list_del_init(&virt_ep->bw_endpoint_list); |
| 2130 | } |
| 2131 | |
| 2132 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, |
| 2133 | struct xhci_bw_info *ep_bw, |
| 2134 | struct xhci_interval_bw_table *bw_table, |
| 2135 | struct usb_device *udev, |
| 2136 | struct xhci_virt_ep *virt_ep, |
| 2137 | struct xhci_tt_bw_info *tt_info) |
| 2138 | { |
| 2139 | struct xhci_interval_bw *interval_bw; |
| 2140 | struct xhci_virt_ep *smaller_ep; |
| 2141 | int normalized_interval; |
| 2142 | |
| 2143 | if (xhci_is_async_ep(ep_bw->type)) |
| 2144 | return; |
| 2145 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame^] | 2146 | if (udev->speed == USB_SPEED_SUPER) { |
| 2147 | if (xhci_is_sync_in_ep(ep_bw->type)) |
| 2148 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += |
| 2149 | xhci_get_ss_bw_consumed(ep_bw); |
| 2150 | else |
| 2151 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += |
| 2152 | xhci_get_ss_bw_consumed(ep_bw); |
| 2153 | return; |
| 2154 | } |
| 2155 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2156 | /* For LS/FS devices, we need to translate the interval expressed in |
| 2157 | * microframes to frames. |
| 2158 | */ |
| 2159 | if (udev->speed == USB_SPEED_HIGH) |
| 2160 | normalized_interval = ep_bw->ep_interval; |
| 2161 | else |
| 2162 | normalized_interval = ep_bw->ep_interval - 3; |
| 2163 | |
| 2164 | if (normalized_interval == 0) |
| 2165 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; |
| 2166 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
| 2167 | interval_bw->num_packets += ep_bw->num_packets; |
| 2168 | switch (udev->speed) { |
| 2169 | case USB_SPEED_LOW: |
| 2170 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; |
| 2171 | break; |
| 2172 | case USB_SPEED_FULL: |
| 2173 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; |
| 2174 | break; |
| 2175 | case USB_SPEED_HIGH: |
| 2176 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; |
| 2177 | break; |
| 2178 | case USB_SPEED_SUPER: |
| 2179 | case USB_SPEED_UNKNOWN: |
| 2180 | case USB_SPEED_WIRELESS: |
| 2181 | /* Should never happen because only LS/FS/HS endpoints will get |
| 2182 | * added to the endpoint list. |
| 2183 | */ |
| 2184 | return; |
| 2185 | } |
| 2186 | |
| 2187 | if (tt_info) |
| 2188 | tt_info->active_eps += 1; |
| 2189 | /* Insert the endpoint into the list, largest max packet size first. */ |
| 2190 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, |
| 2191 | bw_endpoint_list) { |
| 2192 | if (ep_bw->max_packet_size >= |
| 2193 | smaller_ep->bw_info.max_packet_size) { |
| 2194 | /* Add the new ep before the smaller endpoint */ |
| 2195 | list_add_tail(&virt_ep->bw_endpoint_list, |
| 2196 | &smaller_ep->bw_endpoint_list); |
| 2197 | return; |
| 2198 | } |
| 2199 | } |
| 2200 | /* Add the new endpoint at the end of the list. */ |
| 2201 | list_add_tail(&virt_ep->bw_endpoint_list, |
| 2202 | &interval_bw->endpoints); |
| 2203 | } |
| 2204 | |
| 2205 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, |
| 2206 | struct xhci_virt_device *virt_dev, |
| 2207 | int old_active_eps) |
| 2208 | { |
| 2209 | struct xhci_root_port_bw_info *rh_bw_info; |
| 2210 | if (!virt_dev->tt_info) |
| 2211 | return; |
| 2212 | |
| 2213 | rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; |
| 2214 | if (old_active_eps == 0 && |
| 2215 | virt_dev->tt_info->active_eps != 0) { |
| 2216 | rh_bw_info->num_active_tts += 1; |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2217 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2218 | } else if (old_active_eps != 0 && |
| 2219 | virt_dev->tt_info->active_eps == 0) { |
| 2220 | rh_bw_info->num_active_tts -= 1; |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2221 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2222 | } |
| 2223 | } |
| 2224 | |
| 2225 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, |
| 2226 | struct xhci_virt_device *virt_dev, |
| 2227 | struct xhci_container_ctx *in_ctx) |
| 2228 | { |
| 2229 | struct xhci_bw_info ep_bw_info[31]; |
| 2230 | int i; |
| 2231 | struct xhci_input_control_ctx *ctrl_ctx; |
| 2232 | int old_active_eps = 0; |
| 2233 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2234 | if (virt_dev->tt_info) |
| 2235 | old_active_eps = virt_dev->tt_info->active_eps; |
| 2236 | |
| 2237 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
| 2238 | |
| 2239 | for (i = 0; i < 31; i++) { |
| 2240 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
| 2241 | continue; |
| 2242 | |
| 2243 | /* Make a copy of the BW info in case we need to revert this */ |
| 2244 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, |
| 2245 | sizeof(ep_bw_info[i])); |
| 2246 | /* Drop the endpoint from the interval table if the endpoint is |
| 2247 | * being dropped or changed. |
| 2248 | */ |
| 2249 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
| 2250 | xhci_drop_ep_from_interval_table(xhci, |
| 2251 | &virt_dev->eps[i].bw_info, |
| 2252 | virt_dev->bw_table, |
| 2253 | virt_dev->udev, |
| 2254 | &virt_dev->eps[i], |
| 2255 | virt_dev->tt_info); |
| 2256 | } |
| 2257 | /* Overwrite the information stored in the endpoints' bw_info */ |
| 2258 | xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); |
| 2259 | for (i = 0; i < 31; i++) { |
| 2260 | /* Add any changed or added endpoints to the interval table */ |
| 2261 | if (EP_IS_ADDED(ctrl_ctx, i)) |
| 2262 | xhci_add_ep_to_interval_table(xhci, |
| 2263 | &virt_dev->eps[i].bw_info, |
| 2264 | virt_dev->bw_table, |
| 2265 | virt_dev->udev, |
| 2266 | &virt_dev->eps[i], |
| 2267 | virt_dev->tt_info); |
| 2268 | } |
| 2269 | |
| 2270 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { |
| 2271 | /* Ok, this fits in the bandwidth we have. |
| 2272 | * Update the number of active TTs. |
| 2273 | */ |
| 2274 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
| 2275 | return 0; |
| 2276 | } |
| 2277 | |
| 2278 | /* We don't have enough bandwidth for this, revert the stored info. */ |
| 2279 | for (i = 0; i < 31; i++) { |
| 2280 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
| 2281 | continue; |
| 2282 | |
| 2283 | /* Drop the new copies of any added or changed endpoints from |
| 2284 | * the interval table. |
| 2285 | */ |
| 2286 | if (EP_IS_ADDED(ctrl_ctx, i)) { |
| 2287 | xhci_drop_ep_from_interval_table(xhci, |
| 2288 | &virt_dev->eps[i].bw_info, |
| 2289 | virt_dev->bw_table, |
| 2290 | virt_dev->udev, |
| 2291 | &virt_dev->eps[i], |
| 2292 | virt_dev->tt_info); |
| 2293 | } |
| 2294 | /* Revert the endpoint back to its old information */ |
| 2295 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], |
| 2296 | sizeof(ep_bw_info[i])); |
| 2297 | /* Add any changed or dropped endpoints back into the table */ |
| 2298 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
| 2299 | xhci_add_ep_to_interval_table(xhci, |
| 2300 | &virt_dev->eps[i].bw_info, |
| 2301 | virt_dev->bw_table, |
| 2302 | virt_dev->udev, |
| 2303 | &virt_dev->eps[i], |
| 2304 | virt_dev->tt_info); |
| 2305 | } |
| 2306 | return -ENOMEM; |
| 2307 | } |
| 2308 | |
| 2309 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2310 | /* Issue a configure endpoint command or evaluate context command |
| 2311 | * and wait for it to finish. |
| 2312 | */ |
| 2313 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2314 | struct usb_device *udev, |
| 2315 | struct xhci_command *command, |
| 2316 | bool ctx_change, bool must_succeed) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2317 | { |
| 2318 | int ret; |
| 2319 | int timeleft; |
| 2320 | unsigned long flags; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2321 | struct xhci_container_ctx *in_ctx; |
| 2322 | struct completion *cmd_completion; |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2323 | u32 *cmd_status; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2324 | struct xhci_virt_device *virt_dev; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2325 | |
| 2326 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2327 | virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2328 | |
Sarah Sharp | 750645f | 2011-09-02 11:05:43 -0700 | [diff] [blame] | 2329 | if (command) |
| 2330 | in_ctx = command->in_ctx; |
| 2331 | else |
| 2332 | in_ctx = virt_dev->in_ctx; |
| 2333 | |
| 2334 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
| 2335 | xhci_reserve_host_resources(xhci, in_ctx)) { |
| 2336 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2337 | xhci_warn(xhci, "Not enough host resources, " |
| 2338 | "active endpoint contexts = %u\n", |
| 2339 | xhci->num_active_eps); |
| 2340 | return -ENOMEM; |
| 2341 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2342 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && |
| 2343 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { |
| 2344 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
| 2345 | xhci_free_host_resources(xhci, in_ctx); |
| 2346 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2347 | xhci_warn(xhci, "Not enough bandwidth\n"); |
| 2348 | return -ENOMEM; |
| 2349 | } |
Sarah Sharp | 750645f | 2011-09-02 11:05:43 -0700 | [diff] [blame] | 2350 | |
| 2351 | if (command) { |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2352 | cmd_completion = command->completion; |
| 2353 | cmd_status = &command->status; |
| 2354 | command->command_trb = xhci->cmd_ring->enqueue; |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 2355 | |
| 2356 | /* Enqueue pointer can be left pointing to the link TRB, |
| 2357 | * we must handle that |
| 2358 | */ |
Matt Evans | f5960b6 | 2011-06-01 10:22:55 +1000 | [diff] [blame] | 2359 | if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 2360 | command->command_trb = |
| 2361 | xhci->cmd_ring->enq_seg->next->trbs; |
| 2362 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2363 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
| 2364 | } else { |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2365 | cmd_completion = &virt_dev->cmd_completion; |
| 2366 | cmd_status = &virt_dev->cmd_status; |
| 2367 | } |
Andiry Xu | 1d68064 | 2010-03-12 17:10:04 +0800 | [diff] [blame] | 2368 | init_completion(cmd_completion); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2369 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2370 | if (!ctx_change) |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2371 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, |
| 2372 | udev->slot_id, must_succeed); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2373 | else |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2374 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2375 | udev->slot_id); |
| 2376 | if (ret < 0) { |
Sarah Sharp | c01591b | 2009-12-09 15:58:58 -0800 | [diff] [blame] | 2377 | if (command) |
| 2378 | list_del(&command->cmd_list); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2379 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
| 2380 | xhci_free_host_resources(xhci, in_ctx); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2381 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2382 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); |
| 2383 | return -ENOMEM; |
| 2384 | } |
| 2385 | xhci_ring_cmd_db(xhci); |
| 2386 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2387 | |
| 2388 | /* Wait for the configure endpoint command to complete */ |
| 2389 | timeleft = wait_for_completion_interruptible_timeout( |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2390 | cmd_completion, |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2391 | USB_CTRL_SET_TIMEOUT); |
| 2392 | if (timeleft <= 0) { |
| 2393 | xhci_warn(xhci, "%s while waiting for %s command\n", |
| 2394 | timeleft == 0 ? "Timeout" : "Signal", |
| 2395 | ctx_change == 0 ? |
| 2396 | "configure endpoint" : |
| 2397 | "evaluate context"); |
| 2398 | /* FIXME cancel the configure endpoint command */ |
| 2399 | return -ETIME; |
| 2400 | } |
| 2401 | |
| 2402 | if (!ctx_change) |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2403 | ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); |
| 2404 | else |
| 2405 | ret = xhci_evaluate_context_result(xhci, udev, cmd_status); |
| 2406 | |
| 2407 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 2408 | spin_lock_irqsave(&xhci->lock, flags); |
| 2409 | /* If the command failed, remove the reserved resources. |
| 2410 | * Otherwise, clean up the estimate to include dropped eps. |
| 2411 | */ |
| 2412 | if (ret) |
| 2413 | xhci_free_host_resources(xhci, in_ctx); |
| 2414 | else |
| 2415 | xhci_finish_resource_reservation(xhci, in_ctx); |
| 2416 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2417 | } |
| 2418 | return ret; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2419 | } |
| 2420 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 2421 | /* Called after one or more calls to xhci_add_endpoint() or |
| 2422 | * xhci_drop_endpoint(). If this call fails, the USB core is expected |
| 2423 | * to call xhci_reset_bandwidth(). |
| 2424 | * |
| 2425 | * Since we are in the middle of changing either configuration or |
| 2426 | * installing a new alt setting, the USB core won't allow URBs to be |
| 2427 | * enqueued for any endpoint on the old config or interface. Nothing |
| 2428 | * else should be touching the xhci->devs[slot_id] structure, so we |
| 2429 | * don't need to take the xhci->lock for manipulating that. |
| 2430 | */ |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2431 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
| 2432 | { |
| 2433 | int i; |
| 2434 | int ret = 0; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2435 | struct xhci_hcd *xhci; |
| 2436 | struct xhci_virt_device *virt_dev; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2437 | struct xhci_input_control_ctx *ctrl_ctx; |
| 2438 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2439 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 2440 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2441 | if (ret <= 0) |
| 2442 | return ret; |
| 2443 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 2444 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 2445 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2446 | |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 2447 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2448 | virt_dev = xhci->devs[udev->slot_id]; |
| 2449 | |
| 2450 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2451 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2452 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
| 2453 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
| 2454 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
Sarah Sharp | 2dc3753 | 2011-09-02 11:05:40 -0700 | [diff] [blame] | 2455 | |
| 2456 | /* Don't issue the command if there's no endpoints to update. */ |
| 2457 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && |
| 2458 | ctrl_ctx->drop_flags == 0) |
| 2459 | return 0; |
| 2460 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2461 | xhci_dbg(xhci, "New Input Control Context:\n"); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2462 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
| 2463 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2464 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2465 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2466 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
| 2467 | false, false); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2468 | if (ret) { |
| 2469 | /* Callee should call reset_bandwidth() */ |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2470 | return ret; |
| 2471 | } |
| 2472 | |
| 2473 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2474 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2475 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2476 | |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2477 | /* Free any rings that were dropped, but not changed. */ |
| 2478 | for (i = 1; i < 31; ++i) { |
Matt Evans | 4819fef | 2011-06-01 13:01:07 +1000 | [diff] [blame] | 2479 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && |
| 2480 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2481 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
| 2482 | } |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2483 | xhci_zero_in_ctx(xhci, virt_dev); |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2484 | /* |
| 2485 | * Install any rings for completely new endpoints or changed endpoints, |
| 2486 | * and free or cache any old rings from changed endpoints. |
| 2487 | */ |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2488 | for (i = 1; i < 31; ++i) { |
Sarah Sharp | 74f9fe2 | 2009-12-03 09:44:29 -0800 | [diff] [blame] | 2489 | if (!virt_dev->eps[i].new_ring) |
| 2490 | continue; |
| 2491 | /* Only cache or free the old ring if it exists. |
| 2492 | * It may not if this is the first add of an endpoint. |
| 2493 | */ |
| 2494 | if (virt_dev->eps[i].ring) { |
Sarah Sharp | 412566b | 2009-12-09 15:59:01 -0800 | [diff] [blame] | 2495 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2496 | } |
Sarah Sharp | 74f9fe2 | 2009-12-03 09:44:29 -0800 | [diff] [blame] | 2497 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
| 2498 | virt_dev->eps[i].new_ring = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2499 | } |
| 2500 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2501 | return ret; |
| 2502 | } |
| 2503 | |
| 2504 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
| 2505 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2506 | struct xhci_hcd *xhci; |
| 2507 | struct xhci_virt_device *virt_dev; |
| 2508 | int i, ret; |
| 2509 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 2510 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2511 | if (ret <= 0) |
| 2512 | return; |
| 2513 | xhci = hcd_to_xhci(hcd); |
| 2514 | |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 2515 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2516 | virt_dev = xhci->devs[udev->slot_id]; |
| 2517 | /* Free any rings allocated for added endpoints */ |
| 2518 | for (i = 0; i < 31; ++i) { |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2519 | if (virt_dev->eps[i].new_ring) { |
| 2520 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
| 2521 | virt_dev->eps[i].new_ring = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2522 | } |
| 2523 | } |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2524 | xhci_zero_in_ctx(xhci, virt_dev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2525 | } |
| 2526 | |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2527 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2528 | struct xhci_container_ctx *in_ctx, |
| 2529 | struct xhci_container_ctx *out_ctx, |
| 2530 | u32 add_flags, u32 drop_flags) |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2531 | { |
| 2532 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2533 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2534 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
| 2535 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2536 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2537 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2538 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2539 | xhci_dbg(xhci, "Input Context:\n"); |
| 2540 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2541 | } |
| 2542 | |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 2543 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2544 | unsigned int slot_id, unsigned int ep_index, |
| 2545 | struct xhci_dequeue_state *deq_state) |
| 2546 | { |
| 2547 | struct xhci_container_ctx *in_ctx; |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2548 | struct xhci_ep_ctx *ep_ctx; |
| 2549 | u32 added_ctxs; |
| 2550 | dma_addr_t addr; |
| 2551 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2552 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
| 2553 | xhci->devs[slot_id]->out_ctx, ep_index); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2554 | in_ctx = xhci->devs[slot_id]->in_ctx; |
| 2555 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
| 2556 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, |
| 2557 | deq_state->new_deq_ptr); |
| 2558 | if (addr == 0) { |
| 2559 | xhci_warn(xhci, "WARN Cannot submit config ep after " |
| 2560 | "reset ep command\n"); |
| 2561 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", |
| 2562 | deq_state->new_deq_seg, |
| 2563 | deq_state->new_deq_ptr); |
| 2564 | return; |
| 2565 | } |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2566 | ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2567 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2568 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2569 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
| 2570 | xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2571 | } |
| 2572 | |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2573 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2574 | struct usb_device *udev, unsigned int ep_index) |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2575 | { |
| 2576 | struct xhci_dequeue_state deq_state; |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2577 | struct xhci_virt_ep *ep; |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2578 | |
| 2579 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2580 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2581 | /* We need to move the HW's dequeue pointer past this TD, |
| 2582 | * or it will attempt to resend it on the next doorbell ring. |
| 2583 | */ |
| 2584 | xhci_find_new_dequeue_state(xhci, udev->slot_id, |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 2585 | ep_index, ep->stopped_stream, ep->stopped_td, |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2586 | &deq_state); |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2587 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2588 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
| 2589 | * issue a configure endpoint command later. |
| 2590 | */ |
| 2591 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { |
| 2592 | xhci_dbg(xhci, "Queueing new dequeue state\n"); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2593 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 2594 | ep_index, ep->stopped_stream, &deq_state); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2595 | } else { |
| 2596 | /* Better hope no one uses the input context between now and the |
| 2597 | * reset endpoint completion! |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 2598 | * XXX: No idea how this hardware will react when stream rings |
| 2599 | * are enabled. |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2600 | */ |
| 2601 | xhci_dbg(xhci, "Setting up input context for " |
| 2602 | "configure endpoint command\n"); |
| 2603 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, |
| 2604 | ep_index, &deq_state); |
| 2605 | } |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2606 | } |
| 2607 | |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2608 | /* Deal with stalled endpoints. The core should have sent the control message |
| 2609 | * to clear the halt condition. However, we need to make the xHCI hardware |
| 2610 | * reset its sequence number, since a device will expect a sequence number of |
| 2611 | * zero after the halt condition is cleared. |
| 2612 | * Context: in_interrupt |
| 2613 | */ |
| 2614 | void xhci_endpoint_reset(struct usb_hcd *hcd, |
| 2615 | struct usb_host_endpoint *ep) |
| 2616 | { |
| 2617 | struct xhci_hcd *xhci; |
| 2618 | struct usb_device *udev; |
| 2619 | unsigned int ep_index; |
| 2620 | unsigned long flags; |
| 2621 | int ret; |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2622 | struct xhci_virt_ep *virt_ep; |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2623 | |
| 2624 | xhci = hcd_to_xhci(hcd); |
| 2625 | udev = (struct usb_device *) ep->hcpriv; |
| 2626 | /* Called with a root hub endpoint (or an endpoint that wasn't added |
| 2627 | * with xhci_add_endpoint() |
| 2628 | */ |
| 2629 | if (!ep->hcpriv) |
| 2630 | return; |
| 2631 | ep_index = xhci_get_endpoint_index(&ep->desc); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2632 | virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
| 2633 | if (!virt_ep->stopped_td) { |
Sarah Sharp | c92bcfa | 2009-07-27 12:05:21 -0700 | [diff] [blame] | 2634 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", |
| 2635 | ep->desc.bEndpointAddress); |
| 2636 | return; |
| 2637 | } |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2638 | if (usb_endpoint_xfer_control(&ep->desc)) { |
| 2639 | xhci_dbg(xhci, "Control endpoint stall already handled.\n"); |
| 2640 | return; |
| 2641 | } |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2642 | |
| 2643 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); |
| 2644 | spin_lock_irqsave(&xhci->lock, flags); |
| 2645 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); |
Sarah Sharp | c92bcfa | 2009-07-27 12:05:21 -0700 | [diff] [blame] | 2646 | /* |
| 2647 | * Can't change the ring dequeue pointer until it's transitioned to the |
| 2648 | * stopped state, which is only upon a successful reset endpoint |
| 2649 | * command. Better hope that last command worked! |
| 2650 | */ |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2651 | if (!ret) { |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2652 | xhci_cleanup_stalled_ring(xhci, udev, ep_index); |
| 2653 | kfree(virt_ep->stopped_td); |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2654 | xhci_ring_cmd_db(xhci); |
| 2655 | } |
Sarah Sharp | 1624ae1 | 2010-05-06 13:40:08 -0700 | [diff] [blame] | 2656 | virt_ep->stopped_td = NULL; |
| 2657 | virt_ep->stopped_trb = NULL; |
Sarah Sharp | 5e5cf6f | 2010-05-06 13:40:18 -0700 | [diff] [blame] | 2658 | virt_ep->stopped_stream = 0; |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2659 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2660 | |
| 2661 | if (ret) |
| 2662 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); |
| 2663 | } |
| 2664 | |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2665 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
| 2666 | struct usb_device *udev, struct usb_host_endpoint *ep, |
| 2667 | unsigned int slot_id) |
| 2668 | { |
| 2669 | int ret; |
| 2670 | unsigned int ep_index; |
| 2671 | unsigned int ep_state; |
| 2672 | |
| 2673 | if (!ep) |
| 2674 | return -EINVAL; |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 2675 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2676 | if (ret <= 0) |
| 2677 | return -EINVAL; |
Alan Stern | 842f169 | 2010-04-30 12:44:46 -0400 | [diff] [blame] | 2678 | if (ep->ss_ep_comp.bmAttributes == 0) { |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2679 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
| 2680 | " descriptor for ep 0x%x does not support streams\n", |
| 2681 | ep->desc.bEndpointAddress); |
| 2682 | return -EINVAL; |
| 2683 | } |
| 2684 | |
| 2685 | ep_index = xhci_get_endpoint_index(&ep->desc); |
| 2686 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
| 2687 | if (ep_state & EP_HAS_STREAMS || |
| 2688 | ep_state & EP_GETTING_STREAMS) { |
| 2689 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " |
| 2690 | "already has streams set up.\n", |
| 2691 | ep->desc.bEndpointAddress); |
| 2692 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " |
| 2693 | "dynamic stream context array reallocation.\n"); |
| 2694 | return -EINVAL; |
| 2695 | } |
| 2696 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { |
| 2697 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " |
| 2698 | "endpoint 0x%x; URBs are pending.\n", |
| 2699 | ep->desc.bEndpointAddress); |
| 2700 | return -EINVAL; |
| 2701 | } |
| 2702 | return 0; |
| 2703 | } |
| 2704 | |
| 2705 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, |
| 2706 | unsigned int *num_streams, unsigned int *num_stream_ctxs) |
| 2707 | { |
| 2708 | unsigned int max_streams; |
| 2709 | |
| 2710 | /* The stream context array size must be a power of two */ |
| 2711 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); |
| 2712 | /* |
| 2713 | * Find out how many primary stream array entries the host controller |
| 2714 | * supports. Later we may use secondary stream arrays (similar to 2nd |
| 2715 | * level page entries), but that's an optional feature for xHCI host |
| 2716 | * controllers. xHCs must support at least 4 stream IDs. |
| 2717 | */ |
| 2718 | max_streams = HCC_MAX_PSA(xhci->hcc_params); |
| 2719 | if (*num_stream_ctxs > max_streams) { |
| 2720 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", |
| 2721 | max_streams); |
| 2722 | *num_stream_ctxs = max_streams; |
| 2723 | *num_streams = max_streams; |
| 2724 | } |
| 2725 | } |
| 2726 | |
| 2727 | /* Returns an error code if one of the endpoint already has streams. |
| 2728 | * This does not change any data structures, it only checks and gathers |
| 2729 | * information. |
| 2730 | */ |
| 2731 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, |
| 2732 | struct usb_device *udev, |
| 2733 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 2734 | unsigned int *num_streams, u32 *changed_ep_bitmask) |
| 2735 | { |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2736 | unsigned int max_streams; |
| 2737 | unsigned int endpoint_flag; |
| 2738 | int i; |
| 2739 | int ret; |
| 2740 | |
| 2741 | for (i = 0; i < num_eps; i++) { |
| 2742 | ret = xhci_check_streams_endpoint(xhci, udev, |
| 2743 | eps[i], udev->slot_id); |
| 2744 | if (ret < 0) |
| 2745 | return ret; |
| 2746 | |
Alan Stern | 842f169 | 2010-04-30 12:44:46 -0400 | [diff] [blame] | 2747 | max_streams = USB_SS_MAX_STREAMS( |
| 2748 | eps[i]->ss_ep_comp.bmAttributes); |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2749 | if (max_streams < (*num_streams - 1)) { |
| 2750 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", |
| 2751 | eps[i]->desc.bEndpointAddress, |
| 2752 | max_streams); |
| 2753 | *num_streams = max_streams+1; |
| 2754 | } |
| 2755 | |
| 2756 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); |
| 2757 | if (*changed_ep_bitmask & endpoint_flag) |
| 2758 | return -EINVAL; |
| 2759 | *changed_ep_bitmask |= endpoint_flag; |
| 2760 | } |
| 2761 | return 0; |
| 2762 | } |
| 2763 | |
| 2764 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, |
| 2765 | struct usb_device *udev, |
| 2766 | struct usb_host_endpoint **eps, unsigned int num_eps) |
| 2767 | { |
| 2768 | u32 changed_ep_bitmask = 0; |
| 2769 | unsigned int slot_id; |
| 2770 | unsigned int ep_index; |
| 2771 | unsigned int ep_state; |
| 2772 | int i; |
| 2773 | |
| 2774 | slot_id = udev->slot_id; |
| 2775 | if (!xhci->devs[slot_id]) |
| 2776 | return 0; |
| 2777 | |
| 2778 | for (i = 0; i < num_eps; i++) { |
| 2779 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2780 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
| 2781 | /* Are streams already being freed for the endpoint? */ |
| 2782 | if (ep_state & EP_GETTING_NO_STREAMS) { |
| 2783 | xhci_warn(xhci, "WARN Can't disable streams for " |
| 2784 | "endpoint 0x%x\n, " |
| 2785 | "streams are being disabled already.", |
| 2786 | eps[i]->desc.bEndpointAddress); |
| 2787 | return 0; |
| 2788 | } |
| 2789 | /* Are there actually any streams to free? */ |
| 2790 | if (!(ep_state & EP_HAS_STREAMS) && |
| 2791 | !(ep_state & EP_GETTING_STREAMS)) { |
| 2792 | xhci_warn(xhci, "WARN Can't disable streams for " |
| 2793 | "endpoint 0x%x\n, " |
| 2794 | "streams are already disabled!", |
| 2795 | eps[i]->desc.bEndpointAddress); |
| 2796 | xhci_warn(xhci, "WARN xhci_free_streams() called " |
| 2797 | "with non-streams endpoint\n"); |
| 2798 | return 0; |
| 2799 | } |
| 2800 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); |
| 2801 | } |
| 2802 | return changed_ep_bitmask; |
| 2803 | } |
| 2804 | |
| 2805 | /* |
| 2806 | * The USB device drivers use this function (though the HCD interface in USB |
| 2807 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to |
| 2808 | * coordinate mass storage command queueing across multiple endpoints (basically |
| 2809 | * a stream ID == a task ID). |
| 2810 | * |
| 2811 | * Setting up streams involves allocating the same size stream context array |
| 2812 | * for each endpoint and issuing a configure endpoint command for all endpoints. |
| 2813 | * |
| 2814 | * Don't allow the call to succeed if one endpoint only supports one stream |
| 2815 | * (which means it doesn't support streams at all). |
| 2816 | * |
| 2817 | * Drivers may get less stream IDs than they asked for, if the host controller |
| 2818 | * hardware or endpoints claim they can't support the number of requested |
| 2819 | * stream IDs. |
| 2820 | */ |
| 2821 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, |
| 2822 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 2823 | unsigned int num_streams, gfp_t mem_flags) |
| 2824 | { |
| 2825 | int i, ret; |
| 2826 | struct xhci_hcd *xhci; |
| 2827 | struct xhci_virt_device *vdev; |
| 2828 | struct xhci_command *config_cmd; |
| 2829 | unsigned int ep_index; |
| 2830 | unsigned int num_stream_ctxs; |
| 2831 | unsigned long flags; |
| 2832 | u32 changed_ep_bitmask = 0; |
| 2833 | |
| 2834 | if (!eps) |
| 2835 | return -EINVAL; |
| 2836 | |
| 2837 | /* Add one to the number of streams requested to account for |
| 2838 | * stream 0 that is reserved for xHCI usage. |
| 2839 | */ |
| 2840 | num_streams += 1; |
| 2841 | xhci = hcd_to_xhci(hcd); |
| 2842 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", |
| 2843 | num_streams); |
| 2844 | |
| 2845 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); |
| 2846 | if (!config_cmd) { |
| 2847 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); |
| 2848 | return -ENOMEM; |
| 2849 | } |
| 2850 | |
| 2851 | /* Check to make sure all endpoints are not already configured for |
| 2852 | * streams. While we're at it, find the maximum number of streams that |
| 2853 | * all the endpoints will support and check for duplicate endpoints. |
| 2854 | */ |
| 2855 | spin_lock_irqsave(&xhci->lock, flags); |
| 2856 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, |
| 2857 | num_eps, &num_streams, &changed_ep_bitmask); |
| 2858 | if (ret < 0) { |
| 2859 | xhci_free_command(xhci, config_cmd); |
| 2860 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2861 | return ret; |
| 2862 | } |
| 2863 | if (num_streams <= 1) { |
| 2864 | xhci_warn(xhci, "WARN: endpoints can't handle " |
| 2865 | "more than one stream.\n"); |
| 2866 | xhci_free_command(xhci, config_cmd); |
| 2867 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2868 | return -EINVAL; |
| 2869 | } |
| 2870 | vdev = xhci->devs[udev->slot_id]; |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 2871 | /* Mark each endpoint as being in transition, so |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2872 | * xhci_urb_enqueue() will reject all URBs. |
| 2873 | */ |
| 2874 | for (i = 0; i < num_eps; i++) { |
| 2875 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2876 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; |
| 2877 | } |
| 2878 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2879 | |
| 2880 | /* Setup internal data structures and allocate HW data structures for |
| 2881 | * streams (but don't install the HW structures in the input context |
| 2882 | * until we're sure all memory allocation succeeded). |
| 2883 | */ |
| 2884 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); |
| 2885 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", |
| 2886 | num_stream_ctxs, num_streams); |
| 2887 | |
| 2888 | for (i = 0; i < num_eps; i++) { |
| 2889 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2890 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, |
| 2891 | num_stream_ctxs, |
| 2892 | num_streams, mem_flags); |
| 2893 | if (!vdev->eps[ep_index].stream_info) |
| 2894 | goto cleanup; |
| 2895 | /* Set maxPstreams in endpoint context and update deq ptr to |
| 2896 | * point to stream context array. FIXME |
| 2897 | */ |
| 2898 | } |
| 2899 | |
| 2900 | /* Set up the input context for a configure endpoint command. */ |
| 2901 | for (i = 0; i < num_eps; i++) { |
| 2902 | struct xhci_ep_ctx *ep_ctx; |
| 2903 | |
| 2904 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2905 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); |
| 2906 | |
| 2907 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, |
| 2908 | vdev->out_ctx, ep_index); |
| 2909 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, |
| 2910 | vdev->eps[ep_index].stream_info); |
| 2911 | } |
| 2912 | /* Tell the HW to drop its old copy of the endpoint context info |
| 2913 | * and add the updated copy from the input context. |
| 2914 | */ |
| 2915 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, |
| 2916 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); |
| 2917 | |
| 2918 | /* Issue and wait for the configure endpoint command */ |
| 2919 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, |
| 2920 | false, false); |
| 2921 | |
| 2922 | /* xHC rejected the configure endpoint command for some reason, so we |
| 2923 | * leave the old ring intact and free our internal streams data |
| 2924 | * structure. |
| 2925 | */ |
| 2926 | if (ret < 0) |
| 2927 | goto cleanup; |
| 2928 | |
| 2929 | spin_lock_irqsave(&xhci->lock, flags); |
| 2930 | for (i = 0; i < num_eps; i++) { |
| 2931 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2932 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
| 2933 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", |
| 2934 | udev->slot_id, ep_index); |
| 2935 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; |
| 2936 | } |
| 2937 | xhci_free_command(xhci, config_cmd); |
| 2938 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2939 | |
| 2940 | /* Subtract 1 for stream 0, which drivers can't use */ |
| 2941 | return num_streams - 1; |
| 2942 | |
| 2943 | cleanup: |
| 2944 | /* If it didn't work, free the streams! */ |
| 2945 | for (i = 0; i < num_eps; i++) { |
| 2946 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2947 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); |
Sarah Sharp | 8a00774 | 2010-04-30 15:37:56 -0700 | [diff] [blame] | 2948 | vdev->eps[ep_index].stream_info = NULL; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2949 | /* FIXME Unset maxPstreams in endpoint context and |
| 2950 | * update deq ptr to point to normal string ring. |
| 2951 | */ |
| 2952 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
| 2953 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
| 2954 | xhci_endpoint_zero(xhci, vdev, eps[i]); |
| 2955 | } |
| 2956 | xhci_free_command(xhci, config_cmd); |
| 2957 | return -ENOMEM; |
| 2958 | } |
| 2959 | |
| 2960 | /* Transition the endpoint from using streams to being a "normal" endpoint |
| 2961 | * without streams. |
| 2962 | * |
| 2963 | * Modify the endpoint context state, submit a configure endpoint command, |
| 2964 | * and free all endpoint rings for streams if that completes successfully. |
| 2965 | */ |
| 2966 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, |
| 2967 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 2968 | gfp_t mem_flags) |
| 2969 | { |
| 2970 | int i, ret; |
| 2971 | struct xhci_hcd *xhci; |
| 2972 | struct xhci_virt_device *vdev; |
| 2973 | struct xhci_command *command; |
| 2974 | unsigned int ep_index; |
| 2975 | unsigned long flags; |
| 2976 | u32 changed_ep_bitmask; |
| 2977 | |
| 2978 | xhci = hcd_to_xhci(hcd); |
| 2979 | vdev = xhci->devs[udev->slot_id]; |
| 2980 | |
| 2981 | /* Set up a configure endpoint command to remove the streams rings */ |
| 2982 | spin_lock_irqsave(&xhci->lock, flags); |
| 2983 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, |
| 2984 | udev, eps, num_eps); |
| 2985 | if (changed_ep_bitmask == 0) { |
| 2986 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2987 | return -EINVAL; |
| 2988 | } |
| 2989 | |
| 2990 | /* Use the xhci_command structure from the first endpoint. We may have |
| 2991 | * allocated too many, but the driver may call xhci_free_streams() for |
| 2992 | * each endpoint it grouped into one call to xhci_alloc_streams(). |
| 2993 | */ |
| 2994 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); |
| 2995 | command = vdev->eps[ep_index].stream_info->free_streams_command; |
| 2996 | for (i = 0; i < num_eps; i++) { |
| 2997 | struct xhci_ep_ctx *ep_ctx; |
| 2998 | |
| 2999 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3000 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); |
| 3001 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= |
| 3002 | EP_GETTING_NO_STREAMS; |
| 3003 | |
| 3004 | xhci_endpoint_copy(xhci, command->in_ctx, |
| 3005 | vdev->out_ctx, ep_index); |
| 3006 | xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, |
| 3007 | &vdev->eps[ep_index]); |
| 3008 | } |
| 3009 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, |
| 3010 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); |
| 3011 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3012 | |
| 3013 | /* Issue and wait for the configure endpoint command, |
| 3014 | * which must succeed. |
| 3015 | */ |
| 3016 | ret = xhci_configure_endpoint(xhci, udev, command, |
| 3017 | false, true); |
| 3018 | |
| 3019 | /* xHC rejected the configure endpoint command for some reason, so we |
| 3020 | * leave the streams rings intact. |
| 3021 | */ |
| 3022 | if (ret < 0) |
| 3023 | return ret; |
| 3024 | |
| 3025 | spin_lock_irqsave(&xhci->lock, flags); |
| 3026 | for (i = 0; i < num_eps; i++) { |
| 3027 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3028 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); |
Sarah Sharp | 8a00774 | 2010-04-30 15:37:56 -0700 | [diff] [blame] | 3029 | vdev->eps[ep_index].stream_info = NULL; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3030 | /* FIXME Unset maxPstreams in endpoint context and |
| 3031 | * update deq ptr to point to normal string ring. |
| 3032 | */ |
| 3033 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; |
| 3034 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
| 3035 | } |
| 3036 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3037 | |
| 3038 | return 0; |
| 3039 | } |
| 3040 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3041 | /* |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3042 | * Deletes endpoint resources for endpoints that were active before a Reset |
| 3043 | * Device command, or a Disable Slot command. The Reset Device command leaves |
| 3044 | * the control endpoint intact, whereas the Disable Slot command deletes it. |
| 3045 | * |
| 3046 | * Must be called with xhci->lock held. |
| 3047 | */ |
| 3048 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, |
| 3049 | struct xhci_virt_device *virt_dev, bool drop_control_ep) |
| 3050 | { |
| 3051 | int i; |
| 3052 | unsigned int num_dropped_eps = 0; |
| 3053 | unsigned int drop_flags = 0; |
| 3054 | |
| 3055 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { |
| 3056 | if (virt_dev->eps[i].ring) { |
| 3057 | drop_flags |= 1 << i; |
| 3058 | num_dropped_eps++; |
| 3059 | } |
| 3060 | } |
| 3061 | xhci->num_active_eps -= num_dropped_eps; |
| 3062 | if (num_dropped_eps) |
| 3063 | xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " |
| 3064 | "%u now active.\n", |
| 3065 | num_dropped_eps, drop_flags, |
| 3066 | xhci->num_active_eps); |
| 3067 | } |
| 3068 | |
| 3069 | /* |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3070 | * This submits a Reset Device Command, which will set the device state to 0, |
| 3071 | * set the device address to 0, and disable all the endpoints except the default |
| 3072 | * control endpoint. The USB core should come back and call |
| 3073 | * xhci_address_device(), and then re-set up the configuration. If this is |
| 3074 | * called because of a usb_reset_and_verify_device(), then the old alternate |
| 3075 | * settings will be re-installed through the normal bandwidth allocation |
| 3076 | * functions. |
| 3077 | * |
| 3078 | * Wait for the Reset Device command to finish. Remove all structures |
| 3079 | * associated with the endpoints that were disabled. Clear the input device |
| 3080 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3081 | * |
| 3082 | * If the virt_dev to be reset does not exist or does not match the udev, |
| 3083 | * it means the device is lost, possibly due to the xHC restore error and |
| 3084 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to |
| 3085 | * re-allocate the device. |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3086 | */ |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3087 | int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3088 | { |
| 3089 | int ret, i; |
| 3090 | unsigned long flags; |
| 3091 | struct xhci_hcd *xhci; |
| 3092 | unsigned int slot_id; |
| 3093 | struct xhci_virt_device *virt_dev; |
| 3094 | struct xhci_command *reset_device_cmd; |
| 3095 | int timeleft; |
| 3096 | int last_freed_endpoint; |
Maarten Lankhorst | 001fd38 | 2011-06-01 23:27:50 +0200 | [diff] [blame] | 3097 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3098 | int old_active_eps = 0; |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3099 | |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3100 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3101 | if (ret <= 0) |
| 3102 | return ret; |
| 3103 | xhci = hcd_to_xhci(hcd); |
| 3104 | slot_id = udev->slot_id; |
| 3105 | virt_dev = xhci->devs[slot_id]; |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3106 | if (!virt_dev) { |
| 3107 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
| 3108 | "not exist. Re-allocate the device\n", slot_id); |
| 3109 | ret = xhci_alloc_dev(hcd, udev); |
| 3110 | if (ret == 1) |
| 3111 | return 0; |
| 3112 | else |
| 3113 | return -EINVAL; |
| 3114 | } |
| 3115 | |
| 3116 | if (virt_dev->udev != udev) { |
| 3117 | /* If the virt_dev and the udev does not match, this virt_dev |
| 3118 | * may belong to another udev. |
| 3119 | * Re-allocate the device. |
| 3120 | */ |
| 3121 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
| 3122 | "not match the udev. Re-allocate the device\n", |
| 3123 | slot_id); |
| 3124 | ret = xhci_alloc_dev(hcd, udev); |
| 3125 | if (ret == 1) |
| 3126 | return 0; |
| 3127 | else |
| 3128 | return -EINVAL; |
| 3129 | } |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3130 | |
Maarten Lankhorst | 001fd38 | 2011-06-01 23:27:50 +0200 | [diff] [blame] | 3131 | /* If device is not setup, there is no point in resetting it */ |
| 3132 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
| 3133 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
| 3134 | SLOT_STATE_DISABLED) |
| 3135 | return 0; |
| 3136 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3137 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); |
| 3138 | /* Allocate the command structure that holds the struct completion. |
| 3139 | * Assume we're in process context, since the normal device reset |
| 3140 | * process has to wait for the device anyway. Storage devices are |
| 3141 | * reset as part of error handling, so use GFP_NOIO instead of |
| 3142 | * GFP_KERNEL. |
| 3143 | */ |
| 3144 | reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); |
| 3145 | if (!reset_device_cmd) { |
| 3146 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); |
| 3147 | return -ENOMEM; |
| 3148 | } |
| 3149 | |
| 3150 | /* Attempt to submit the Reset Device command to the command ring */ |
| 3151 | spin_lock_irqsave(&xhci->lock, flags); |
| 3152 | reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 3153 | |
| 3154 | /* Enqueue pointer can be left pointing to the link TRB, |
| 3155 | * we must handle that |
| 3156 | */ |
Matt Evans | f5960b6 | 2011-06-01 10:22:55 +1000 | [diff] [blame] | 3157 | if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 3158 | reset_device_cmd->command_trb = |
| 3159 | xhci->cmd_ring->enq_seg->next->trbs; |
| 3160 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3161 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); |
| 3162 | ret = xhci_queue_reset_device(xhci, slot_id); |
| 3163 | if (ret) { |
| 3164 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3165 | list_del(&reset_device_cmd->cmd_list); |
| 3166 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3167 | goto command_cleanup; |
| 3168 | } |
| 3169 | xhci_ring_cmd_db(xhci); |
| 3170 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3171 | |
| 3172 | /* Wait for the Reset Device command to finish */ |
| 3173 | timeleft = wait_for_completion_interruptible_timeout( |
| 3174 | reset_device_cmd->completion, |
| 3175 | USB_CTRL_SET_TIMEOUT); |
| 3176 | if (timeleft <= 0) { |
| 3177 | xhci_warn(xhci, "%s while waiting for reset device command\n", |
| 3178 | timeleft == 0 ? "Timeout" : "Signal"); |
| 3179 | spin_lock_irqsave(&xhci->lock, flags); |
| 3180 | /* The timeout might have raced with the event ring handler, so |
| 3181 | * only delete from the list if the item isn't poisoned. |
| 3182 | */ |
| 3183 | if (reset_device_cmd->cmd_list.next != LIST_POISON1) |
| 3184 | list_del(&reset_device_cmd->cmd_list); |
| 3185 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3186 | ret = -ETIME; |
| 3187 | goto command_cleanup; |
| 3188 | } |
| 3189 | |
| 3190 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, |
| 3191 | * unless we tried to reset a slot ID that wasn't enabled, |
| 3192 | * or the device wasn't in the addressed or configured state. |
| 3193 | */ |
| 3194 | ret = reset_device_cmd->status; |
| 3195 | switch (ret) { |
| 3196 | case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ |
| 3197 | case COMP_CTX_STATE: /* 0.96 completion code for same thing */ |
| 3198 | xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", |
| 3199 | slot_id, |
| 3200 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); |
| 3201 | xhci_info(xhci, "Not freeing device rings.\n"); |
| 3202 | /* Don't treat this as an error. May change my mind later. */ |
| 3203 | ret = 0; |
| 3204 | goto command_cleanup; |
| 3205 | case COMP_SUCCESS: |
| 3206 | xhci_dbg(xhci, "Successful reset device command.\n"); |
| 3207 | break; |
| 3208 | default: |
| 3209 | if (xhci_is_vendor_info_code(xhci, ret)) |
| 3210 | break; |
| 3211 | xhci_warn(xhci, "Unknown completion code %u for " |
| 3212 | "reset device command.\n", ret); |
| 3213 | ret = -EINVAL; |
| 3214 | goto command_cleanup; |
| 3215 | } |
| 3216 | |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3217 | /* Free up host controller endpoint resources */ |
| 3218 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 3219 | spin_lock_irqsave(&xhci->lock, flags); |
| 3220 | /* Don't delete the default control endpoint resources */ |
| 3221 | xhci_free_device_endpoint_resources(xhci, virt_dev, false); |
| 3222 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3223 | } |
| 3224 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3225 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ |
| 3226 | last_freed_endpoint = 1; |
| 3227 | for (i = 1; i < 31; ++i) { |
Dmitry Torokhov | 2dea75d | 2011-04-12 23:06:28 -0700 | [diff] [blame] | 3228 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
| 3229 | |
| 3230 | if (ep->ep_state & EP_HAS_STREAMS) { |
| 3231 | xhci_free_stream_info(xhci, ep->stream_info); |
| 3232 | ep->stream_info = NULL; |
| 3233 | ep->ep_state &= ~EP_HAS_STREAMS; |
| 3234 | } |
| 3235 | |
| 3236 | if (ep->ring) { |
| 3237 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
| 3238 | last_freed_endpoint = i; |
| 3239 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3240 | if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) |
| 3241 | xhci_drop_ep_from_interval_table(xhci, |
| 3242 | &virt_dev->eps[i].bw_info, |
| 3243 | virt_dev->bw_table, |
| 3244 | udev, |
| 3245 | &virt_dev->eps[i], |
| 3246 | virt_dev->tt_info); |
Sarah Sharp | 9af5d71 | 2011-09-02 11:05:48 -0700 | [diff] [blame] | 3247 | xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3248 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3249 | /* If necessary, update the number of active TTs on this root port */ |
| 3250 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
| 3251 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3252 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); |
| 3253 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); |
| 3254 | ret = 0; |
| 3255 | |
| 3256 | command_cleanup: |
| 3257 | xhci_free_command(xhci, reset_device_cmd); |
| 3258 | return ret; |
| 3259 | } |
| 3260 | |
| 3261 | /* |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3262 | * At this point, the struct usb_device is about to go away, the device has |
| 3263 | * disconnected, and all traffic has been stopped and the endpoints have been |
| 3264 | * disabled. Free any HC data structures associated with that device. |
| 3265 | */ |
| 3266 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
| 3267 | { |
| 3268 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3269 | struct xhci_virt_device *virt_dev; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3270 | unsigned long flags; |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3271 | u32 state; |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3272 | int i, ret; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3273 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3274 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 3275 | /* If the host is halted due to driver unload, we still need to free the |
| 3276 | * device. |
| 3277 | */ |
| 3278 | if (ret <= 0 && ret != -ENODEV) |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3279 | return; |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3280 | |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3281 | virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3282 | |
| 3283 | /* Stop any wayward timer functions (which may grab the lock) */ |
| 3284 | for (i = 0; i < 31; ++i) { |
| 3285 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; |
| 3286 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
| 3287 | } |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3288 | |
| 3289 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3290 | /* Don't disable the slot if the host controller is dead. */ |
| 3291 | state = xhci_readl(xhci, &xhci->op_regs->status); |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 3292 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
| 3293 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3294 | xhci_free_virt_device(xhci, udev->slot_id); |
| 3295 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3296 | return; |
| 3297 | } |
| 3298 | |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3299 | if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3300 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3301 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3302 | return; |
| 3303 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3304 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3305 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3306 | /* |
| 3307 | * Event command completion handler will free any data structures |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 3308 | * associated with the slot. XXX Can free sleep? |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3309 | */ |
| 3310 | } |
| 3311 | |
| 3312 | /* |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3313 | * Checks if we have enough host controller resources for the default control |
| 3314 | * endpoint. |
| 3315 | * |
| 3316 | * Must be called with xhci->lock held. |
| 3317 | */ |
| 3318 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) |
| 3319 | { |
| 3320 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { |
| 3321 | xhci_dbg(xhci, "Not enough ep ctxs: " |
| 3322 | "%u active, need to add 1, limit is %u.\n", |
| 3323 | xhci->num_active_eps, xhci->limit_active_eps); |
| 3324 | return -ENOMEM; |
| 3325 | } |
| 3326 | xhci->num_active_eps += 1; |
| 3327 | xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", |
| 3328 | xhci->num_active_eps); |
| 3329 | return 0; |
| 3330 | } |
| 3331 | |
| 3332 | |
| 3333 | /* |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3334 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command |
| 3335 | * timed out, or allocating memory failed. Returns 1 on success. |
| 3336 | */ |
| 3337 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) |
| 3338 | { |
| 3339 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 3340 | unsigned long flags; |
| 3341 | int timeleft; |
| 3342 | int ret; |
| 3343 | |
| 3344 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3345 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3346 | if (ret) { |
| 3347 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3348 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3349 | return 0; |
| 3350 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3351 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3352 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3353 | |
| 3354 | /* XXX: how much time for xHC slot assignment? */ |
| 3355 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, |
| 3356 | USB_CTRL_SET_TIMEOUT); |
| 3357 | if (timeleft <= 0) { |
| 3358 | xhci_warn(xhci, "%s while waiting for a slot\n", |
| 3359 | timeleft == 0 ? "Timeout" : "Signal"); |
| 3360 | /* FIXME cancel the enable slot request */ |
| 3361 | return 0; |
| 3362 | } |
| 3363 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3364 | if (!xhci->slot_id) { |
| 3365 | xhci_err(xhci, "Error while assigning device slot ID\n"); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3366 | return 0; |
| 3367 | } |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3368 | |
| 3369 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 3370 | spin_lock_irqsave(&xhci->lock, flags); |
| 3371 | ret = xhci_reserve_host_control_ep_resources(xhci); |
| 3372 | if (ret) { |
| 3373 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3374 | xhci_warn(xhci, "Not enough host resources, " |
| 3375 | "active endpoint contexts = %u\n", |
| 3376 | xhci->num_active_eps); |
| 3377 | goto disable_slot; |
| 3378 | } |
| 3379 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3380 | } |
| 3381 | /* Use GFP_NOIO, since this function can be called from |
Sarah Sharp | a6d940d | 2010-12-28 13:08:42 -0800 | [diff] [blame] | 3382 | * xhci_discover_or_reset_device(), which may be called as part of |
| 3383 | * mass storage driver error handling. |
| 3384 | */ |
| 3385 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3386 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3387 | goto disable_slot; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3388 | } |
| 3389 | udev->slot_id = xhci->slot_id; |
| 3390 | /* Is this a LS or FS device under a HS hub? */ |
| 3391 | /* Hub or peripherial? */ |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3392 | return 1; |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3393 | |
| 3394 | disable_slot: |
| 3395 | /* Disable slot, if we can do it without mem alloc */ |
| 3396 | spin_lock_irqsave(&xhci->lock, flags); |
| 3397 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) |
| 3398 | xhci_ring_cmd_db(xhci); |
| 3399 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3400 | return 0; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3401 | } |
| 3402 | |
| 3403 | /* |
| 3404 | * Issue an Address Device command (which will issue a SetAddress request to |
| 3405 | * the device). |
| 3406 | * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so |
| 3407 | * we should only issue and wait on one address command at the same time. |
| 3408 | * |
| 3409 | * We add one to the device address issued by the hardware because the USB core |
| 3410 | * uses address 1 for the root hubs (even though they're not really devices). |
| 3411 | */ |
| 3412 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) |
| 3413 | { |
| 3414 | unsigned long flags; |
| 3415 | int timeleft; |
| 3416 | struct xhci_virt_device *virt_dev; |
| 3417 | int ret = 0; |
| 3418 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3419 | struct xhci_slot_ctx *slot_ctx; |
| 3420 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 3421 | u64 temp_64; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3422 | |
| 3423 | if (!udev->slot_id) { |
| 3424 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); |
| 3425 | return -EINVAL; |
| 3426 | } |
| 3427 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3428 | virt_dev = xhci->devs[udev->slot_id]; |
| 3429 | |
Matt Evans | 7ed603e | 2011-03-29 13:40:56 +1100 | [diff] [blame] | 3430 | if (WARN_ON(!virt_dev)) { |
| 3431 | /* |
| 3432 | * In plug/unplug torture test with an NEC controller, |
| 3433 | * a zero-dereference was observed once due to virt_dev = 0. |
| 3434 | * Print useful debug rather than crash if it is observed again! |
| 3435 | */ |
| 3436 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", |
| 3437 | udev->slot_id); |
| 3438 | return -EINVAL; |
| 3439 | } |
| 3440 | |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3441 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
| 3442 | /* |
| 3443 | * If this is the first Set Address since device plug-in or |
| 3444 | * virt_device realloaction after a resume with an xHCI power loss, |
| 3445 | * then set up the slot context. |
| 3446 | */ |
| 3447 | if (!slot_ctx->dev_info) |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3448 | xhci_setup_addressable_virt_dev(xhci, udev); |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3449 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
Sarah Sharp | 2d1ee59 | 2010-07-09 17:08:54 +0200 | [diff] [blame] | 3450 | else |
| 3451 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 3452 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3453 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3454 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 3455 | spin_lock_irqsave(&xhci->lock, flags); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3456 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, |
| 3457 | udev->slot_id); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3458 | if (ret) { |
| 3459 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3460 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3461 | return ret; |
| 3462 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3463 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3464 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3465 | |
| 3466 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |
| 3467 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, |
| 3468 | USB_CTRL_SET_TIMEOUT); |
| 3469 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
| 3470 | * the SetAddress() "recovery interval" required by USB and aborting the |
| 3471 | * command on a timeout. |
| 3472 | */ |
| 3473 | if (timeleft <= 0) { |
| 3474 | xhci_warn(xhci, "%s while waiting for a slot\n", |
| 3475 | timeleft == 0 ? "Timeout" : "Signal"); |
| 3476 | /* FIXME cancel the address device command */ |
| 3477 | return -ETIME; |
| 3478 | } |
| 3479 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3480 | switch (virt_dev->cmd_status) { |
| 3481 | case COMP_CTX_STATE: |
| 3482 | case COMP_EBADSLT: |
| 3483 | xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", |
| 3484 | udev->slot_id); |
| 3485 | ret = -EINVAL; |
| 3486 | break; |
| 3487 | case COMP_TX_ERR: |
| 3488 | dev_warn(&udev->dev, "Device not responding to set address.\n"); |
| 3489 | ret = -EPROTO; |
| 3490 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 3491 | case COMP_DEV_ERR: |
| 3492 | dev_warn(&udev->dev, "ERROR: Incompatible device for address " |
| 3493 | "device command.\n"); |
| 3494 | ret = -ENODEV; |
| 3495 | break; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3496 | case COMP_SUCCESS: |
| 3497 | xhci_dbg(xhci, "Successful Address Device command\n"); |
| 3498 | break; |
| 3499 | default: |
| 3500 | xhci_err(xhci, "ERROR: unexpected command completion " |
| 3501 | "code 0x%x.\n", virt_dev->cmd_status); |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 3502 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3503 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3504 | ret = -EINVAL; |
| 3505 | break; |
| 3506 | } |
| 3507 | if (ret) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3508 | return ret; |
| 3509 | } |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 3510 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
| 3511 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); |
| 3512 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 3513 | udev->slot_id, |
| 3514 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
| 3515 | (unsigned long long) |
| 3516 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 3517 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3518 | (unsigned long long)virt_dev->out_ctx->dma); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3519 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3520 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3521 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3522 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3523 | /* |
| 3524 | * USB core uses address 1 for the roothubs, so we add one to the |
| 3525 | * address given back to us by the HC. |
| 3526 | */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3527 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
Andiry Xu | c8d4af8 | 2010-10-14 07:22:51 -0700 | [diff] [blame] | 3528 | /* Use kernel assigned address for devices; store xHC assigned |
| 3529 | * address locally. */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 3530 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) |
| 3531 | + 1; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 3532 | /* Zero the input context control for later use */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3533 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
| 3534 | ctrl_ctx->add_flags = 0; |
| 3535 | ctrl_ctx->drop_flags = 0; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3536 | |
Andiry Xu | c8d4af8 | 2010-10-14 07:22:51 -0700 | [diff] [blame] | 3537 | xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3538 | |
| 3539 | return 0; |
| 3540 | } |
| 3541 | |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3542 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
| 3543 | * internal data structures for the device. |
| 3544 | */ |
| 3545 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
| 3546 | struct usb_tt *tt, gfp_t mem_flags) |
| 3547 | { |
| 3548 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 3549 | struct xhci_virt_device *vdev; |
| 3550 | struct xhci_command *config_cmd; |
| 3551 | struct xhci_input_control_ctx *ctrl_ctx; |
| 3552 | struct xhci_slot_ctx *slot_ctx; |
| 3553 | unsigned long flags; |
| 3554 | unsigned think_time; |
| 3555 | int ret; |
| 3556 | |
| 3557 | /* Ignore root hubs */ |
| 3558 | if (!hdev->parent) |
| 3559 | return 0; |
| 3560 | |
| 3561 | vdev = xhci->devs[hdev->slot_id]; |
| 3562 | if (!vdev) { |
| 3563 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); |
| 3564 | return -EINVAL; |
| 3565 | } |
Sarah Sharp | a1d78c1 | 2009-12-09 15:59:03 -0800 | [diff] [blame] | 3566 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3567 | if (!config_cmd) { |
| 3568 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); |
| 3569 | return -ENOMEM; |
| 3570 | } |
| 3571 | |
| 3572 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 839c817 | 2011-09-02 11:05:47 -0700 | [diff] [blame] | 3573 | if (hdev->speed == USB_SPEED_HIGH && |
| 3574 | xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { |
| 3575 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); |
| 3576 | xhci_free_command(xhci, config_cmd); |
| 3577 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3578 | return -ENOMEM; |
| 3579 | } |
| 3580 | |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3581 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
| 3582 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 3583 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3584 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 3585 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3586 | if (tt->multi) |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 3587 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3588 | if (xhci->hci_version > 0x95) { |
| 3589 | xhci_dbg(xhci, "xHCI version %x needs hub " |
| 3590 | "TT think time and number of ports\n", |
| 3591 | (unsigned int) xhci->hci_version); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 3592 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3593 | /* Set TT think time - convert from ns to FS bit times. |
| 3594 | * 0 = 8 FS bit times, 1 = 16 FS bit times, |
| 3595 | * 2 = 24 FS bit times, 3 = 32 FS bit times. |
Andiry Xu | 700b417 | 2011-05-05 18:14:05 +0800 | [diff] [blame] | 3596 | * |
| 3597 | * xHCI 1.0: this field shall be 0 if the device is not a |
| 3598 | * High-spped hub. |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3599 | */ |
| 3600 | think_time = tt->think_time; |
| 3601 | if (think_time != 0) |
| 3602 | think_time = (think_time / 666) - 1; |
Andiry Xu | 700b417 | 2011-05-05 18:14:05 +0800 | [diff] [blame] | 3603 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
| 3604 | slot_ctx->tt_info |= |
| 3605 | cpu_to_le32(TT_THINK_TIME(think_time)); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 3606 | } else { |
| 3607 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " |
| 3608 | "TT think time or number of ports\n", |
| 3609 | (unsigned int) xhci->hci_version); |
| 3610 | } |
| 3611 | slot_ctx->dev_state = 0; |
| 3612 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3613 | |
| 3614 | xhci_dbg(xhci, "Set up %s for hub device.\n", |
| 3615 | (xhci->hci_version > 0x95) ? |
| 3616 | "configure endpoint" : "evaluate context"); |
| 3617 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); |
| 3618 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); |
| 3619 | |
| 3620 | /* Issue and wait for the configure endpoint or |
| 3621 | * evaluate context command. |
| 3622 | */ |
| 3623 | if (xhci->hci_version > 0x95) |
| 3624 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, |
| 3625 | false, false); |
| 3626 | else |
| 3627 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, |
| 3628 | true, false); |
| 3629 | |
| 3630 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); |
| 3631 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); |
| 3632 | |
| 3633 | xhci_free_command(xhci, config_cmd); |
| 3634 | return ret; |
| 3635 | } |
| 3636 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 3637 | int xhci_get_frame(struct usb_hcd *hcd) |
| 3638 | { |
| 3639 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 3640 | /* EHCI mods by the periodic size. Why? */ |
| 3641 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; |
| 3642 | } |
| 3643 | |
| 3644 | MODULE_DESCRIPTION(DRIVER_DESC); |
| 3645 | MODULE_AUTHOR(DRIVER_AUTHOR); |
| 3646 | MODULE_LICENSE("GPL"); |
| 3647 | |
| 3648 | static int __init xhci_hcd_init(void) |
| 3649 | { |
| 3650 | #ifdef CONFIG_PCI |
| 3651 | int retval = 0; |
| 3652 | |
| 3653 | retval = xhci_register_pci(); |
| 3654 | |
| 3655 | if (retval < 0) { |
| 3656 | printk(KERN_DEBUG "Problem registering PCI driver."); |
| 3657 | return retval; |
| 3658 | } |
| 3659 | #endif |
Sarah Sharp | 9844197 | 2009-05-14 11:44:18 -0700 | [diff] [blame] | 3660 | /* |
| 3661 | * Check the compiler generated sizes of structures that must be laid |
| 3662 | * out in specific ways for hardware access. |
| 3663 | */ |
| 3664 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); |
| 3665 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); |
| 3666 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); |
| 3667 | /* xhci_device_control has eight fields, and also |
| 3668 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
| 3669 | */ |
Sarah Sharp | 9844197 | 2009-05-14 11:44:18 -0700 | [diff] [blame] | 3670 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
| 3671 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
| 3672 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
| 3673 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); |
| 3674 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); |
| 3675 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ |
| 3676 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); |
| 3677 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 3678 | return 0; |
| 3679 | } |
| 3680 | module_init(xhci_hcd_init); |
| 3681 | |
| 3682 | static void __exit xhci_hcd_cleanup(void) |
| 3683 | { |
| 3684 | #ifdef CONFIG_PCI |
| 3685 | xhci_unregister_pci(); |
| 3686 | #endif |
| 3687 | } |
| 3688 | module_exit(xhci_hcd_cleanup); |