Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 1 | /* |
| 2 | * xHCI host controller driver |
| 3 | * |
| 4 | * Copyright (C) 2008 Intel Corp. |
| 5 | * |
| 6 | * Author: Sarah Sharp |
| 7 | * Some code borrowed from the Linux EHCI driver. |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 16 | * for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software Foundation, |
| 20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 23 | #include <linux/pci.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 24 | #include <linux/irq.h> |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 25 | #include <linux/log2.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 27 | #include <linux/moduleparam.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 29 | |
| 30 | #include "xhci.h" |
| 31 | |
| 32 | #define DRIVER_AUTHOR "Sarah Sharp" |
| 33 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
| 34 | |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 35 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
| 36 | static int link_quirk; |
| 37 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); |
| 38 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); |
| 39 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 40 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ |
| 41 | /* |
| 42 | * handshake - spin reading hc until handshake completes or fails |
| 43 | * @ptr: address of hc register to be read |
| 44 | * @mask: bits to look at in result of read |
| 45 | * @done: value of those bits when handshake succeeds |
| 46 | * @usec: timeout in microseconds |
| 47 | * |
| 48 | * Returns negative errno, or zero on success |
| 49 | * |
| 50 | * Success happens when the "mask" bits have the specified value (hardware |
| 51 | * handshake done). There are two failure modes: "usec" have passed (major |
| 52 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
| 53 | */ |
| 54 | static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, |
| 55 | u32 mask, u32 done, int usec) |
| 56 | { |
| 57 | u32 result; |
| 58 | |
| 59 | do { |
| 60 | result = xhci_readl(xhci, ptr); |
| 61 | if (result == ~(u32)0) /* card removed */ |
| 62 | return -ENODEV; |
| 63 | result &= mask; |
| 64 | if (result == done) |
| 65 | return 0; |
| 66 | udelay(1); |
| 67 | usec--; |
| 68 | } while (usec > 0); |
| 69 | return -ETIMEDOUT; |
| 70 | } |
| 71 | |
| 72 | /* |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 73 | * Disable interrupts and begin the xHCI halting process. |
| 74 | */ |
| 75 | void xhci_quiesce(struct xhci_hcd *xhci) |
| 76 | { |
| 77 | u32 halted; |
| 78 | u32 cmd; |
| 79 | u32 mask; |
| 80 | |
| 81 | mask = ~(XHCI_IRQS); |
| 82 | halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; |
| 83 | if (!halted) |
| 84 | mask &= ~CMD_RUN; |
| 85 | |
| 86 | cmd = xhci_readl(xhci, &xhci->op_regs->command); |
| 87 | cmd &= mask; |
| 88 | xhci_writel(xhci, cmd, &xhci->op_regs->command); |
| 89 | } |
| 90 | |
| 91 | /* |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 92 | * Force HC into halt state. |
| 93 | * |
| 94 | * Disable any IRQs and clear the run/stop bit. |
| 95 | * HC will complete any current and actively pipelined transactions, and |
Andiry Xu | bdfca50 | 2011-01-06 15:43:39 +0800 | [diff] [blame] | 96 | * should halt within 16 ms of the run/stop bit being cleared. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 97 | * Read HC Halted bit in the status register to see when the HC is finished. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 98 | */ |
| 99 | int xhci_halt(struct xhci_hcd *xhci) |
| 100 | { |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 101 | int ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 102 | xhci_dbg(xhci, "// Halt the HC\n"); |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 103 | xhci_quiesce(xhci); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 104 | |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 105 | ret = handshake(xhci, &xhci->op_regs->status, |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 106 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 107 | if (!ret) |
| 108 | xhci->xhc_state |= XHCI_STATE_HALTED; |
Sarah Sharp | 5af98bb | 2012-03-16 12:58:20 -0700 | [diff] [blame] | 109 | else |
| 110 | xhci_warn(xhci, "Host not halted after %u microseconds.\n", |
| 111 | XHCI_MAX_HALT_USEC); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 112 | return ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | /* |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 116 | * Set the run bit and wait for the host to be running. |
| 117 | */ |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 118 | static int xhci_start(struct xhci_hcd *xhci) |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 119 | { |
| 120 | u32 temp; |
| 121 | int ret; |
| 122 | |
| 123 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
| 124 | temp |= (CMD_RUN); |
| 125 | xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", |
| 126 | temp); |
| 127 | xhci_writel(xhci, temp, &xhci->op_regs->command); |
| 128 | |
| 129 | /* |
| 130 | * Wait for the HCHalted Status bit to be 0 to indicate the host is |
| 131 | * running. |
| 132 | */ |
| 133 | ret = handshake(xhci, &xhci->op_regs->status, |
| 134 | STS_HALT, 0, XHCI_MAX_HALT_USEC); |
| 135 | if (ret == -ETIMEDOUT) |
| 136 | xhci_err(xhci, "Host took too long to start, " |
| 137 | "waited %u microseconds.\n", |
| 138 | XHCI_MAX_HALT_USEC); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 139 | if (!ret) |
| 140 | xhci->xhc_state &= ~XHCI_STATE_HALTED; |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 141 | return ret; |
| 142 | } |
| 143 | |
| 144 | /* |
Sarah Sharp | ac04e6f | 2011-03-11 08:47:33 -0800 | [diff] [blame] | 145 | * Reset a halted HC. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 146 | * |
| 147 | * This resets pipelines, timers, counters, state machines, etc. |
| 148 | * Transactions will be terminated immediately, and operational registers |
| 149 | * will be set to their defaults. |
| 150 | */ |
| 151 | int xhci_reset(struct xhci_hcd *xhci) |
| 152 | { |
| 153 | u32 command; |
| 154 | u32 state; |
Andiry Xu | f370b99 | 2012-04-14 02:54:30 +0800 | [diff] [blame] | 155 | int ret, i; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 156 | |
| 157 | state = xhci_readl(xhci, &xhci->op_regs->status); |
Sarah Sharp | d3512f6 | 2009-07-27 12:03:50 -0700 | [diff] [blame] | 158 | if ((state & STS_HALT) == 0) { |
| 159 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); |
| 160 | return 0; |
| 161 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 162 | |
| 163 | xhci_dbg(xhci, "// Reset the HC\n"); |
| 164 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 165 | command |= CMD_RESET; |
| 166 | xhci_writel(xhci, command, &xhci->op_regs->command); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 167 | |
Sarah Sharp | 2d62f3e | 2010-05-24 13:25:15 -0700 | [diff] [blame] | 168 | ret = handshake(xhci, &xhci->op_regs->command, |
| 169 | CMD_RESET, 0, 250 * 1000); |
| 170 | if (ret) |
| 171 | return ret; |
| 172 | |
| 173 | xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); |
| 174 | /* |
| 175 | * xHCI cannot write to any doorbells or operational registers other |
| 176 | * than status until the "Controller Not Ready" flag is cleared. |
| 177 | */ |
Andiry Xu | f370b99 | 2012-04-14 02:54:30 +0800 | [diff] [blame] | 178 | ret = handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); |
| 179 | |
| 180 | for (i = 0; i < 2; ++i) { |
| 181 | xhci->bus_state[i].port_c_suspend = 0; |
| 182 | xhci->bus_state[i].suspended_ports = 0; |
| 183 | xhci->bus_state[i].resuming_ports = 0; |
| 184 | } |
| 185 | |
| 186 | return ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 187 | } |
| 188 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 189 | #ifdef CONFIG_PCI |
| 190 | static int xhci_free_msi(struct xhci_hcd *xhci) |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 191 | { |
| 192 | int i; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 193 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 194 | if (!xhci->msix_entries) |
| 195 | return -EINVAL; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 196 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 197 | for (i = 0; i < xhci->msix_count; i++) |
| 198 | if (xhci->msix_entries[i].vector) |
| 199 | free_irq(xhci->msix_entries[i].vector, |
| 200 | xhci_to_hcd(xhci)); |
| 201 | return 0; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | /* |
| 205 | * Set up MSI |
| 206 | */ |
| 207 | static int xhci_setup_msi(struct xhci_hcd *xhci) |
| 208 | { |
| 209 | int ret; |
| 210 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
| 211 | |
| 212 | ret = pci_enable_msi(pdev); |
| 213 | if (ret) { |
Sarah Sharp | 3b9783b | 2011-12-22 15:02:13 -0800 | [diff] [blame] | 214 | xhci_dbg(xhci, "failed to allocate MSI entry\n"); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 215 | return ret; |
| 216 | } |
| 217 | |
| 218 | ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, |
| 219 | 0, "xhci_hcd", xhci_to_hcd(xhci)); |
| 220 | if (ret) { |
Sarah Sharp | 3b9783b | 2011-12-22 15:02:13 -0800 | [diff] [blame] | 221 | xhci_dbg(xhci, "disable MSI interrupt\n"); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 222 | pci_disable_msi(pdev); |
| 223 | } |
| 224 | |
| 225 | return ret; |
| 226 | } |
| 227 | |
| 228 | /* |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 229 | * Free IRQs |
| 230 | * free all IRQs request |
| 231 | */ |
| 232 | static void xhci_free_irq(struct xhci_hcd *xhci) |
| 233 | { |
| 234 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
| 235 | int ret; |
| 236 | |
| 237 | /* return if using legacy interrupt */ |
Felipe Balbi | cd70469 | 2012-02-29 16:46:23 +0200 | [diff] [blame] | 238 | if (xhci_to_hcd(xhci)->irq > 0) |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 239 | return; |
| 240 | |
| 241 | ret = xhci_free_msi(xhci); |
| 242 | if (!ret) |
| 243 | return; |
Felipe Balbi | cd70469 | 2012-02-29 16:46:23 +0200 | [diff] [blame] | 244 | if (pdev->irq > 0) |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 245 | free_irq(pdev->irq, xhci_to_hcd(xhci)); |
| 246 | |
| 247 | return; |
| 248 | } |
| 249 | |
| 250 | /* |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 251 | * Set up MSI-X |
| 252 | */ |
| 253 | static int xhci_setup_msix(struct xhci_hcd *xhci) |
| 254 | { |
| 255 | int i, ret = 0; |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 256 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 257 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 258 | |
| 259 | /* |
| 260 | * calculate number of msi-x vectors supported. |
| 261 | * - HCS_MAX_INTRS: the max number of interrupts the host can handle, |
| 262 | * with max number of interrupters based on the xhci HCSPARAMS1. |
| 263 | * - num_online_cpus: maximum msi-x vectors per CPUs core. |
| 264 | * Add additional 1 vector to ensure always available interrupt. |
| 265 | */ |
| 266 | xhci->msix_count = min(num_online_cpus() + 1, |
| 267 | HCS_MAX_INTRS(xhci->hcs_params1)); |
| 268 | |
| 269 | xhci->msix_entries = |
| 270 | kmalloc((sizeof(struct msix_entry))*xhci->msix_count, |
Greg Kroah-Hartman | 8687197 | 2010-11-11 09:41:02 -0800 | [diff] [blame] | 271 | GFP_KERNEL); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 272 | if (!xhci->msix_entries) { |
| 273 | xhci_err(xhci, "Failed to allocate MSI-X entries\n"); |
| 274 | return -ENOMEM; |
| 275 | } |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 276 | |
| 277 | for (i = 0; i < xhci->msix_count; i++) { |
| 278 | xhci->msix_entries[i].entry = i; |
| 279 | xhci->msix_entries[i].vector = 0; |
| 280 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 281 | |
| 282 | ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); |
| 283 | if (ret) { |
Sarah Sharp | 3b9783b | 2011-12-22 15:02:13 -0800 | [diff] [blame] | 284 | xhci_dbg(xhci, "Failed to enable MSI-X\n"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 285 | goto free_entries; |
| 286 | } |
| 287 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 288 | for (i = 0; i < xhci->msix_count; i++) { |
| 289 | ret = request_irq(xhci->msix_entries[i].vector, |
| 290 | (irq_handler_t)xhci_msi_irq, |
| 291 | 0, "xhci_hcd", xhci_to_hcd(xhci)); |
| 292 | if (ret) |
| 293 | goto disable_msix; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 294 | } |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 295 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 296 | hcd->msix_enabled = 1; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 297 | return ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 298 | |
| 299 | disable_msix: |
Sarah Sharp | 3b9783b | 2011-12-22 15:02:13 -0800 | [diff] [blame] | 300 | xhci_dbg(xhci, "disable MSI-X interrupt\n"); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 301 | xhci_free_irq(xhci); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 302 | pci_disable_msix(pdev); |
| 303 | free_entries: |
| 304 | kfree(xhci->msix_entries); |
| 305 | xhci->msix_entries = NULL; |
| 306 | return ret; |
| 307 | } |
| 308 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 309 | /* Free any IRQs and disable MSI-X */ |
| 310 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) |
| 311 | { |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 312 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 313 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 314 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 315 | xhci_free_irq(xhci); |
| 316 | |
| 317 | if (xhci->msix_entries) { |
| 318 | pci_disable_msix(pdev); |
| 319 | kfree(xhci->msix_entries); |
| 320 | xhci->msix_entries = NULL; |
| 321 | } else { |
| 322 | pci_disable_msi(pdev); |
| 323 | } |
| 324 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 325 | hcd->msix_enabled = 0; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 326 | return; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 327 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 328 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 329 | static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) |
| 330 | { |
| 331 | int i; |
| 332 | |
| 333 | if (xhci->msix_entries) { |
| 334 | for (i = 0; i < xhci->msix_count; i++) |
| 335 | synchronize_irq(xhci->msix_entries[i].vector); |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | static int xhci_try_enable_msi(struct usb_hcd *hcd) |
| 340 | { |
| 341 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 342 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
| 343 | int ret; |
| 344 | |
| 345 | /* |
| 346 | * Some Fresco Logic host controllers advertise MSI, but fail to |
| 347 | * generate interrupts. Don't even try to enable MSI. |
| 348 | */ |
| 349 | if (xhci->quirks & XHCI_BROKEN_MSI) |
| 350 | return 0; |
| 351 | |
| 352 | /* unregister the legacy interrupt */ |
| 353 | if (hcd->irq) |
| 354 | free_irq(hcd->irq, hcd); |
Felipe Balbi | cd70469 | 2012-02-29 16:46:23 +0200 | [diff] [blame] | 355 | hcd->irq = 0; |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 356 | |
| 357 | ret = xhci_setup_msix(xhci); |
| 358 | if (ret) |
| 359 | /* fall back to msi*/ |
| 360 | ret = xhci_setup_msi(xhci); |
| 361 | |
| 362 | if (!ret) |
Felipe Balbi | cd70469 | 2012-02-29 16:46:23 +0200 | [diff] [blame] | 363 | /* hcd->irq is 0, we have MSI */ |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 364 | return 0; |
| 365 | |
Sarah Sharp | 68d07f6 | 2012-02-13 16:25:57 -0800 | [diff] [blame] | 366 | if (!pdev->irq) { |
| 367 | xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); |
| 368 | return -EINVAL; |
| 369 | } |
| 370 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 371 | /* fall back to legacy interrupt*/ |
| 372 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, |
| 373 | hcd->irq_descr, hcd); |
| 374 | if (ret) { |
| 375 | xhci_err(xhci, "request interrupt %d failed\n", |
| 376 | pdev->irq); |
| 377 | return ret; |
| 378 | } |
| 379 | hcd->irq = pdev->irq; |
| 380 | return 0; |
| 381 | } |
| 382 | |
| 383 | #else |
| 384 | |
| 385 | static int xhci_try_enable_msi(struct usb_hcd *hcd) |
| 386 | { |
| 387 | return 0; |
| 388 | } |
| 389 | |
| 390 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) |
| 391 | { |
| 392 | } |
| 393 | |
| 394 | static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) |
| 395 | { |
| 396 | } |
| 397 | |
| 398 | #endif |
| 399 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 400 | /* |
| 401 | * Initialize memory for HCD and xHC (one-time init). |
| 402 | * |
| 403 | * Program the PAGESIZE register, initialize the device context array, create |
| 404 | * device contexts (?), set up a command ring segment (or two?), create event |
| 405 | * ring (one for now). |
| 406 | */ |
| 407 | int xhci_init(struct usb_hcd *hcd) |
| 408 | { |
| 409 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 410 | int retval = 0; |
| 411 | |
| 412 | xhci_dbg(xhci, "xhci_init\n"); |
| 413 | spin_lock_init(&xhci->lock); |
Sebastian Andrzej Siewior | d782659 | 2011-09-13 16:41:10 -0700 | [diff] [blame] | 414 | if (xhci->hci_version == 0x95 && link_quirk) { |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 415 | xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); |
| 416 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; |
| 417 | } else { |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 418 | xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 419 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 420 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
| 421 | xhci_dbg(xhci, "Finished xhci_init\n"); |
| 422 | |
| 423 | return retval; |
| 424 | } |
| 425 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 426 | /*-------------------------------------------------------------------------*/ |
| 427 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 428 | |
| 429 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 430 | static void xhci_event_ring_work(unsigned long arg) |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 431 | { |
| 432 | unsigned long flags; |
| 433 | int temp; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 434 | u64 temp_64; |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 435 | struct xhci_hcd *xhci = (struct xhci_hcd *) arg; |
| 436 | int i, j; |
| 437 | |
| 438 | xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); |
| 439 | |
| 440 | spin_lock_irqsave(&xhci->lock, flags); |
| 441 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 442 | xhci_dbg(xhci, "op reg status = 0x%x\n", temp); |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 443 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
| 444 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | e4ab05d | 2009-09-16 16:42:30 -0700 | [diff] [blame] | 445 | xhci_dbg(xhci, "HW died, polling stopped.\n"); |
| 446 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 447 | return; |
| 448 | } |
| 449 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 450 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
| 451 | xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 452 | xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); |
| 453 | xhci->error_bitmask = 0; |
| 454 | xhci_dbg(xhci, "Event ring:\n"); |
| 455 | xhci_debug_segment(xhci, xhci->event_ring->deq_seg); |
| 456 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 457 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
| 458 | temp_64 &= ~ERST_PTR_MASK; |
| 459 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 460 | xhci_dbg(xhci, "Command ring:\n"); |
| 461 | xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); |
| 462 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
| 463 | xhci_dbg_cmd_ptrs(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 464 | for (i = 0; i < MAX_HC_SLOTS; ++i) { |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 465 | if (!xhci->devs[i]) |
| 466 | continue; |
| 467 | for (j = 0; j < 31; ++j) { |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 468 | xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 469 | } |
| 470 | } |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 471 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 472 | |
| 473 | if (!xhci->zombie) |
| 474 | mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); |
| 475 | else |
| 476 | xhci_dbg(xhci, "Quit polling the event ring.\n"); |
| 477 | } |
| 478 | #endif |
| 479 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 480 | static int xhci_run_finished(struct xhci_hcd *xhci) |
| 481 | { |
| 482 | if (xhci_start(xhci)) { |
| 483 | xhci_halt(xhci); |
| 484 | return -ENODEV; |
| 485 | } |
| 486 | xhci->shared_hcd->state = HC_STATE_RUNNING; |
| 487 | |
| 488 | if (xhci->quirks & XHCI_NEC_HOST) |
| 489 | xhci_ring_cmd_db(xhci); |
| 490 | |
| 491 | xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); |
| 492 | return 0; |
| 493 | } |
| 494 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 495 | /* |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 496 | * Start the HC after it was halted. |
| 497 | * |
| 498 | * This function is called by the USB core when the HC driver is added. |
| 499 | * Its opposite is xhci_stop(). |
| 500 | * |
| 501 | * xhci_init() must be called once before this function can be called. |
| 502 | * Reset the HC, enable device slot contexts, program DCBAAP, and |
| 503 | * set command ring pointer and event ring pointer. |
| 504 | * |
| 505 | * Setup MSI-X vectors and enable interrupts. |
| 506 | */ |
| 507 | int xhci_run(struct usb_hcd *hcd) |
| 508 | { |
| 509 | u32 temp; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 510 | u64 temp_64; |
Sebastian Andrzej Siewior | 3fd1ec5 | 2011-09-23 14:19:57 -0700 | [diff] [blame] | 511 | int ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 512 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 513 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 514 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
| 515 | * is setup. |
| 516 | */ |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 517 | |
Sarah Sharp | 0f2a793 | 2009-04-27 19:57:12 -0700 | [diff] [blame] | 518 | hcd->uses_new_polling = 1; |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 519 | if (!usb_hcd_is_primary_hcd(hcd)) |
| 520 | return xhci_run_finished(xhci); |
Sarah Sharp | 0f2a793 | 2009-04-27 19:57:12 -0700 | [diff] [blame] | 521 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 522 | xhci_dbg(xhci, "xhci_run\n"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 523 | |
Sebastian Andrzej Siewior | 3fd1ec5 | 2011-09-23 14:19:57 -0700 | [diff] [blame] | 524 | ret = xhci_try_enable_msi(hcd); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 525 | if (ret) |
Sebastian Andrzej Siewior | 3fd1ec5 | 2011-09-23 14:19:57 -0700 | [diff] [blame] | 526 | return ret; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 527 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 528 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
| 529 | init_timer(&xhci->event_ring_timer); |
| 530 | xhci->event_ring_timer.data = (unsigned long) xhci; |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 531 | xhci->event_ring_timer.function = xhci_event_ring_work; |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 532 | /* Poll the event ring */ |
| 533 | xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; |
| 534 | xhci->zombie = 0; |
| 535 | xhci_dbg(xhci, "Setting event ring polling timer\n"); |
| 536 | add_timer(&xhci->event_ring_timer); |
| 537 | #endif |
| 538 | |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 539 | xhci_dbg(xhci, "Command ring memory map follows:\n"); |
| 540 | xhci_debug_ring(xhci, xhci->cmd_ring); |
| 541 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
| 542 | xhci_dbg_cmd_ptrs(xhci); |
| 543 | |
| 544 | xhci_dbg(xhci, "ERST memory map follows:\n"); |
| 545 | xhci_dbg_erst(xhci, &xhci->erst); |
| 546 | xhci_dbg(xhci, "Event ring:\n"); |
| 547 | xhci_debug_ring(xhci, xhci->event_ring); |
| 548 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
| 549 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
| 550 | temp_64 &= ~ERST_PTR_MASK; |
| 551 | xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); |
| 552 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 553 | xhci_dbg(xhci, "// Set the interrupt modulation register\n"); |
| 554 | temp = xhci_readl(xhci, &xhci->ir_set->irq_control); |
Sarah Sharp | a4d8830 | 2009-05-14 11:44:26 -0700 | [diff] [blame] | 555 | temp &= ~ER_IRQ_INTERVAL_MASK; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 556 | temp |= (u32) 160; |
| 557 | xhci_writel(xhci, temp, &xhci->ir_set->irq_control); |
| 558 | |
| 559 | /* Set the HCD state before we enable the irqs */ |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 560 | temp = xhci_readl(xhci, &xhci->op_regs->command); |
| 561 | temp |= (CMD_EIE); |
| 562 | xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", |
| 563 | temp); |
| 564 | xhci_writel(xhci, temp, &xhci->op_regs->command); |
| 565 | |
| 566 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 567 | xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", |
| 568 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 569 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), |
| 570 | &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 571 | xhci_print_ir_set(xhci, 0); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 572 | |
Sarah Sharp | 0238634 | 2010-05-24 13:25:28 -0700 | [diff] [blame] | 573 | if (xhci->quirks & XHCI_NEC_HOST) |
| 574 | xhci_queue_vendor_command(xhci, 0, 0, 0, |
| 575 | TRB_TYPE(TRB_NEC_GET_FW)); |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 576 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 577 | xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 578 | return 0; |
| 579 | } |
| 580 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 581 | static void xhci_only_stop_hcd(struct usb_hcd *hcd) |
| 582 | { |
| 583 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 584 | |
| 585 | spin_lock_irq(&xhci->lock); |
| 586 | xhci_halt(xhci); |
| 587 | |
| 588 | /* The shared_hcd is going to be deallocated shortly (the USB core only |
| 589 | * calls this function when allocation fails in usb_add_hcd(), or |
| 590 | * usb_remove_hcd() is called). So we need to unset xHCI's pointer. |
| 591 | */ |
| 592 | xhci->shared_hcd = NULL; |
| 593 | spin_unlock_irq(&xhci->lock); |
| 594 | } |
| 595 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 596 | /* |
| 597 | * Stop xHCI driver. |
| 598 | * |
| 599 | * This function is called by the USB core when the HC driver is removed. |
| 600 | * Its opposite is xhci_run(). |
| 601 | * |
| 602 | * Disable device contexts, disable IRQs, and quiesce the HC. |
| 603 | * Reset the HC, finish any completed transactions, and cleanup memory. |
| 604 | */ |
| 605 | void xhci_stop(struct usb_hcd *hcd) |
| 606 | { |
| 607 | u32 temp; |
| 608 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 609 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 610 | if (!usb_hcd_is_primary_hcd(hcd)) { |
| 611 | xhci_only_stop_hcd(xhci->shared_hcd); |
| 612 | return; |
| 613 | } |
| 614 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 615 | spin_lock_irq(&xhci->lock); |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 616 | /* Make sure the xHC is halted for a USB3 roothub |
| 617 | * (xhci_stop() could be called as part of failed init). |
| 618 | */ |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 619 | xhci_halt(xhci); |
| 620 | xhci_reset(xhci); |
| 621 | spin_unlock_irq(&xhci->lock); |
| 622 | |
Zhang Rui | 40a9fb1 | 2010-12-17 13:17:04 -0800 | [diff] [blame] | 623 | xhci_cleanup_msix(xhci); |
| 624 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 625 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
| 626 | /* Tell the event ring poll function not to reschedule */ |
| 627 | xhci->zombie = 1; |
| 628 | del_timer_sync(&xhci->event_ring_timer); |
| 629 | #endif |
| 630 | |
Andiry Xu | c41136b | 2011-03-22 17:08:14 +0800 | [diff] [blame] | 631 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
| 632 | usb_amd_dev_put(); |
| 633 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 634 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
| 635 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 636 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); |
| 637 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
| 638 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
| 639 | &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 640 | xhci_print_ir_set(xhci, 0); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 641 | |
| 642 | xhci_dbg(xhci, "cleaning up memory\n"); |
| 643 | xhci_mem_cleanup(xhci); |
| 644 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
| 645 | xhci_readl(xhci, &xhci->op_regs->status)); |
| 646 | } |
| 647 | |
| 648 | /* |
| 649 | * Shutdown HC (not bus-specific) |
| 650 | * |
| 651 | * This is called when the machine is rebooting or halting. We assume that the |
| 652 | * machine will be powered off, and the HC's internal state will be reset. |
| 653 | * Don't bother to free memory. |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 654 | * |
| 655 | * This will only ever be called with the main usb_hcd (the USB3 roothub). |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 656 | */ |
| 657 | void xhci_shutdown(struct usb_hcd *hcd) |
| 658 | { |
| 659 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 660 | |
| 661 | spin_lock_irq(&xhci->lock); |
| 662 | xhci_halt(xhci); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 663 | spin_unlock_irq(&xhci->lock); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 664 | |
Zhang Rui | 40a9fb1 | 2010-12-17 13:17:04 -0800 | [diff] [blame] | 665 | xhci_cleanup_msix(xhci); |
| 666 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 667 | xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", |
| 668 | xhci_readl(xhci, &xhci->op_regs->status)); |
| 669 | } |
| 670 | |
Sarah Sharp | b5b5c3a | 2010-10-15 11:24:14 -0700 | [diff] [blame] | 671 | #ifdef CONFIG_PM |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 672 | static void xhci_save_registers(struct xhci_hcd *xhci) |
| 673 | { |
| 674 | xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command); |
| 675 | xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); |
| 676 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
| 677 | xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 678 | xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); |
| 679 | xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
| 680 | xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
Sarah Sharp | c7713e7 | 2012-03-16 13:19:35 -0700 | [diff] [blame] | 681 | xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
| 682 | xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 683 | } |
| 684 | |
| 685 | static void xhci_restore_registers(struct xhci_hcd *xhci) |
| 686 | { |
| 687 | xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command); |
| 688 | xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); |
| 689 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); |
| 690 | xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 691 | xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); |
| 692 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); |
Sarah Sharp | fb3d85b | 2012-03-16 13:27:39 -0700 | [diff] [blame] | 693 | xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); |
Sarah Sharp | c7713e7 | 2012-03-16 13:19:35 -0700 | [diff] [blame] | 694 | xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); |
| 695 | xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 696 | } |
| 697 | |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 698 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) |
| 699 | { |
| 700 | u64 val_64; |
| 701 | |
| 702 | /* step 2: initialize command ring buffer */ |
| 703 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
| 704 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
| 705 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
| 706 | xhci->cmd_ring->dequeue) & |
| 707 | (u64) ~CMD_RING_RSVD_BITS) | |
| 708 | xhci->cmd_ring->cycle_state; |
| 709 | xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", |
| 710 | (long unsigned long) val_64); |
| 711 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
| 712 | } |
| 713 | |
| 714 | /* |
| 715 | * The whole command ring must be cleared to zero when we suspend the host. |
| 716 | * |
| 717 | * The host doesn't save the command ring pointer in the suspend well, so we |
| 718 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte |
| 719 | * aligned, because of the reserved bits in the command ring dequeue pointer |
| 720 | * register. Therefore, we can't just set the dequeue pointer back in the |
| 721 | * middle of the ring (TRBs are 16-byte aligned). |
| 722 | */ |
| 723 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) |
| 724 | { |
| 725 | struct xhci_ring *ring; |
| 726 | struct xhci_segment *seg; |
| 727 | |
| 728 | ring = xhci->cmd_ring; |
| 729 | seg = ring->deq_seg; |
| 730 | do { |
Andiry Xu | 158886c | 2011-11-30 16:37:41 +0800 | [diff] [blame] | 731 | memset(seg->trbs, 0, |
| 732 | sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); |
| 733 | seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= |
| 734 | cpu_to_le32(~TRB_CYCLE); |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 735 | seg = seg->next; |
| 736 | } while (seg != ring->deq_seg); |
| 737 | |
| 738 | /* Reset the software enqueue and dequeue pointers */ |
| 739 | ring->deq_seg = ring->first_seg; |
| 740 | ring->dequeue = ring->first_seg->trbs; |
| 741 | ring->enq_seg = ring->deq_seg; |
| 742 | ring->enqueue = ring->dequeue; |
| 743 | |
Andiry Xu | b008df6 | 2012-03-05 17:49:34 +0800 | [diff] [blame] | 744 | ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 745 | /* |
| 746 | * Ring is now zeroed, so the HW should look for change of ownership |
| 747 | * when the cycle bit is set to 1. |
| 748 | */ |
| 749 | ring->cycle_state = 1; |
| 750 | |
| 751 | /* |
| 752 | * Reset the hardware dequeue pointer. |
| 753 | * Yes, this will need to be re-written after resume, but we're paranoid |
| 754 | * and want to make sure the hardware doesn't access bogus memory |
| 755 | * because, say, the BIOS or an SMI started the host without changing |
| 756 | * the command ring pointers. |
| 757 | */ |
| 758 | xhci_set_cmd_ring_deq(xhci); |
| 759 | } |
| 760 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 761 | /* |
| 762 | * Stop HC (not bus-specific) |
| 763 | * |
| 764 | * This is called when the machine transition into S3/S4 mode. |
| 765 | * |
| 766 | */ |
| 767 | int xhci_suspend(struct xhci_hcd *xhci) |
| 768 | { |
| 769 | int rc = 0; |
| 770 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 771 | u32 command; |
| 772 | |
| 773 | spin_lock_irq(&xhci->lock); |
| 774 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
Sarah Sharp | b320937 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 775 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 776 | /* step 1: stop endpoint */ |
| 777 | /* skipped assuming that port suspend has done */ |
| 778 | |
| 779 | /* step 2: clear Run/Stop bit */ |
| 780 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 781 | command &= ~CMD_RUN; |
| 782 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 783 | if (handshake(xhci, &xhci->op_regs->status, |
| 784 | STS_HALT, STS_HALT, 100*100)) { |
| 785 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); |
| 786 | spin_unlock_irq(&xhci->lock); |
| 787 | return -ETIMEDOUT; |
| 788 | } |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 789 | xhci_clear_command_ring(xhci); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 790 | |
| 791 | /* step 3: save registers */ |
| 792 | xhci_save_registers(xhci); |
| 793 | |
| 794 | /* step 4: set CSS flag */ |
| 795 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 796 | command |= CMD_CSS; |
| 797 | xhci_writel(xhci, command, &xhci->op_regs->command); |
Andiry Xu | 622eb78 | 2012-06-13 10:51:57 +0800 | [diff] [blame^] | 798 | if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) { |
| 799 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 800 | spin_unlock_irq(&xhci->lock); |
| 801 | return -ETIMEDOUT; |
| 802 | } |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 803 | spin_unlock_irq(&xhci->lock); |
| 804 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 805 | /* step 5: remove core well power */ |
| 806 | /* synchronize irq when using MSI-X */ |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 807 | xhci_msix_sync_irqs(xhci); |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 808 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 809 | return rc; |
| 810 | } |
| 811 | |
| 812 | /* |
| 813 | * start xHC (not bus-specific) |
| 814 | * |
| 815 | * This is called when the machine transition from S3/S4 mode. |
| 816 | * |
| 817 | */ |
| 818 | int xhci_resume(struct xhci_hcd *xhci, bool hibernated) |
| 819 | { |
| 820 | u32 command, temp = 0; |
| 821 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 822 | struct usb_hcd *secondary_hcd; |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 823 | int retval = 0; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 824 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 825 | /* Wait a bit if either of the roothubs need to settle from the |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 826 | * transition into bus suspend. |
Sarah Sharp | 20b67cf | 2010-12-15 12:47:14 -0800 | [diff] [blame] | 827 | */ |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 828 | if (time_before(jiffies, xhci->bus_state[0].next_statechange) || |
| 829 | time_before(jiffies, |
| 830 | xhci->bus_state[1].next_statechange)) |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 831 | msleep(100); |
| 832 | |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 833 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
| 834 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
| 835 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 836 | spin_lock_irq(&xhci->lock); |
Maarten Lankhorst | c877b3b | 2011-06-15 23:47:21 +0200 | [diff] [blame] | 837 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
| 838 | hibernated = true; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 839 | |
| 840 | if (!hibernated) { |
| 841 | /* step 1: restore register */ |
| 842 | xhci_restore_registers(xhci); |
| 843 | /* step 2: initialize command ring buffer */ |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 844 | xhci_set_cmd_ring_deq(xhci); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 845 | /* step 3: restore state and start state*/ |
| 846 | /* step 3: set CRS flag */ |
| 847 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 848 | command |= CMD_CRS; |
| 849 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 850 | if (handshake(xhci, &xhci->op_regs->status, |
Andiry Xu | 622eb78 | 2012-06-13 10:51:57 +0800 | [diff] [blame^] | 851 | STS_RESTORE, 0, 10 * 1000)) { |
| 852 | xhci_warn(xhci, "WARN: xHC restore state timeout\n"); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 853 | spin_unlock_irq(&xhci->lock); |
| 854 | return -ETIMEDOUT; |
| 855 | } |
| 856 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 857 | } |
| 858 | |
| 859 | /* If restore operation fails, re-initialize the HC during resume */ |
| 860 | if ((temp & STS_SRE) || hibernated) { |
Sarah Sharp | fedd383 | 2011-04-12 17:43:19 -0700 | [diff] [blame] | 861 | /* Let the USB core know _both_ roothubs lost power. */ |
| 862 | usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); |
| 863 | usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 864 | |
| 865 | xhci_dbg(xhci, "Stop HCD\n"); |
| 866 | xhci_halt(xhci); |
| 867 | xhci_reset(xhci); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 868 | spin_unlock_irq(&xhci->lock); |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 869 | xhci_cleanup_msix(xhci); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 870 | |
| 871 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
| 872 | /* Tell the event ring poll function not to reschedule */ |
| 873 | xhci->zombie = 1; |
| 874 | del_timer_sync(&xhci->event_ring_timer); |
| 875 | #endif |
| 876 | |
| 877 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
| 878 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
| 879 | xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); |
| 880 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
| 881 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
| 882 | &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 883 | xhci_print_ir_set(xhci, 0); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 884 | |
| 885 | xhci_dbg(xhci, "cleaning up memory\n"); |
| 886 | xhci_mem_cleanup(xhci); |
| 887 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
| 888 | xhci_readl(xhci, &xhci->op_regs->status)); |
| 889 | |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 890 | /* USB core calls the PCI reinit and start functions twice: |
| 891 | * first with the primary HCD, and then with the secondary HCD. |
| 892 | * If we don't do the same, the host will never be started. |
| 893 | */ |
| 894 | if (!usb_hcd_is_primary_hcd(hcd)) |
| 895 | secondary_hcd = hcd; |
| 896 | else |
| 897 | secondary_hcd = xhci->shared_hcd; |
| 898 | |
| 899 | xhci_dbg(xhci, "Initialize the xhci_hcd\n"); |
| 900 | retval = xhci_init(hcd->primary_hcd); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 901 | if (retval) |
| 902 | return retval; |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 903 | xhci_dbg(xhci, "Start the primary HCD\n"); |
| 904 | retval = xhci_run(hcd->primary_hcd); |
Sarah Sharp | b320937 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 905 | if (!retval) { |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 906 | xhci_dbg(xhci, "Start the secondary HCD\n"); |
| 907 | retval = xhci_run(secondary_hcd); |
Sarah Sharp | b320937 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 908 | } |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 909 | hcd->state = HC_STATE_SUSPENDED; |
Sarah Sharp | b320937 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 910 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 911 | goto done; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 912 | } |
| 913 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 914 | /* step 4: set Run/Stop bit */ |
| 915 | command = xhci_readl(xhci, &xhci->op_regs->command); |
| 916 | command |= CMD_RUN; |
| 917 | xhci_writel(xhci, command, &xhci->op_regs->command); |
| 918 | handshake(xhci, &xhci->op_regs->status, STS_HALT, |
| 919 | 0, 250 * 1000); |
| 920 | |
| 921 | /* step 5: walk topology and initialize portsc, |
| 922 | * portpmsc and portli |
| 923 | */ |
| 924 | /* this is done in bus_resume */ |
| 925 | |
| 926 | /* step 6: restart each of the previously |
| 927 | * Running endpoints by ringing their doorbells |
| 928 | */ |
| 929 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 930 | spin_unlock_irq(&xhci->lock); |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 931 | |
| 932 | done: |
| 933 | if (retval == 0) { |
| 934 | usb_hcd_resume_root_hub(hcd); |
| 935 | usb_hcd_resume_root_hub(xhci->shared_hcd); |
| 936 | } |
| 937 | return retval; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 938 | } |
Sarah Sharp | b5b5c3a | 2010-10-15 11:24:14 -0700 | [diff] [blame] | 939 | #endif /* CONFIG_PM */ |
| 940 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 941 | /*-------------------------------------------------------------------------*/ |
| 942 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 943 | /** |
| 944 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and |
| 945 | * HCDs. Find the index for an endpoint given its descriptor. Use the return |
| 946 | * value to right shift 1 for the bitmask. |
| 947 | * |
| 948 | * Index = (epnum * 2) + direction - 1, |
| 949 | * where direction = 0 for OUT, 1 for IN. |
| 950 | * For control endpoints, the IN index is used (OUT index is unused), so |
| 951 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) |
| 952 | */ |
| 953 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) |
| 954 | { |
| 955 | unsigned int index; |
| 956 | if (usb_endpoint_xfer_control(desc)) |
| 957 | index = (unsigned int) (usb_endpoint_num(desc)*2); |
| 958 | else |
| 959 | index = (unsigned int) (usb_endpoint_num(desc)*2) + |
| 960 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; |
| 961 | return index; |
| 962 | } |
| 963 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 964 | /* Find the flag for this endpoint (for use in the control context). Use the |
| 965 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
| 966 | * bit 1, etc. |
| 967 | */ |
| 968 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) |
| 969 | { |
| 970 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
| 971 | } |
| 972 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 973 | /* Find the flag for this endpoint (for use in the control context). Use the |
| 974 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
| 975 | * bit 1, etc. |
| 976 | */ |
| 977 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) |
| 978 | { |
| 979 | return 1 << (ep_index + 1); |
| 980 | } |
| 981 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 982 | /* Compute the last valid endpoint context index. Basically, this is the |
| 983 | * endpoint index plus one. For slot contexts with more than valid endpoint, |
| 984 | * we find the most significant bit set in the added contexts flags. |
| 985 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 |
| 986 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. |
| 987 | */ |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 988 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 989 | { |
| 990 | return fls(added_ctxs) - 1; |
| 991 | } |
| 992 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 993 | /* Returns 1 if the arguments are OK; |
| 994 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
| 995 | */ |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 996 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 997 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
| 998 | const char *func) { |
| 999 | struct xhci_hcd *xhci; |
| 1000 | struct xhci_virt_device *virt_dev; |
| 1001 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1002 | if (!hcd || (check_ep && !ep) || !udev) { |
| 1003 | printk(KERN_DEBUG "xHCI %s called with invalid args\n", |
| 1004 | func); |
| 1005 | return -EINVAL; |
| 1006 | } |
| 1007 | if (!udev->parent) { |
| 1008 | printk(KERN_DEBUG "xHCI %s called for root hub\n", |
| 1009 | func); |
| 1010 | return 0; |
| 1011 | } |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1012 | |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 1013 | xhci = hcd_to_xhci(hcd); |
| 1014 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
| 1015 | return -ENODEV; |
| 1016 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1017 | if (check_virt_dev) { |
sifram.rajas@gmail.com | 73ddc24 | 2011-09-02 11:06:00 -0700 | [diff] [blame] | 1018 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1019 | printk(KERN_DEBUG "xHCI %s called with unaddressed " |
| 1020 | "device\n", func); |
| 1021 | return -EINVAL; |
| 1022 | } |
| 1023 | |
| 1024 | virt_dev = xhci->devs[udev->slot_id]; |
| 1025 | if (virt_dev->udev != udev) { |
| 1026 | printk(KERN_DEBUG "xHCI %s called with udev and " |
| 1027 | "virt_dev does not match\n", func); |
| 1028 | return -EINVAL; |
| 1029 | } |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1030 | } |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1031 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1032 | return 1; |
| 1033 | } |
| 1034 | |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1035 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1036 | struct usb_device *udev, struct xhci_command *command, |
| 1037 | bool ctx_change, bool must_succeed); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1038 | |
| 1039 | /* |
| 1040 | * Full speed devices may have a max packet size greater than 8 bytes, but the |
| 1041 | * USB core doesn't know that until it reads the first 8 bytes of the |
| 1042 | * descriptor. If the usb_device's max packet size changes after that point, |
| 1043 | * we need to issue an evaluate context command and wait on it. |
| 1044 | */ |
| 1045 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, |
| 1046 | unsigned int ep_index, struct urb *urb) |
| 1047 | { |
| 1048 | struct xhci_container_ctx *in_ctx; |
| 1049 | struct xhci_container_ctx *out_ctx; |
| 1050 | struct xhci_input_control_ctx *ctrl_ctx; |
| 1051 | struct xhci_ep_ctx *ep_ctx; |
| 1052 | int max_packet_size; |
| 1053 | int hw_max_packet_size; |
| 1054 | int ret = 0; |
| 1055 | |
| 1056 | out_ctx = xhci->devs[slot_id]->out_ctx; |
| 1057 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1058 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
Kuninori Morimoto | 29cc889 | 2011-08-23 03:12:03 -0700 | [diff] [blame] | 1059 | max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1060 | if (hw_max_packet_size != max_packet_size) { |
| 1061 | xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); |
| 1062 | xhci_dbg(xhci, "Max packet size in usb_device = %d\n", |
| 1063 | max_packet_size); |
| 1064 | xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", |
| 1065 | hw_max_packet_size); |
| 1066 | xhci_dbg(xhci, "Issuing evaluate context command.\n"); |
| 1067 | |
| 1068 | /* Set up the modified control endpoint 0 */ |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1069 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
| 1070 | xhci->devs[slot_id]->out_ctx, ep_index); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1071 | in_ctx = xhci->devs[slot_id]->in_ctx; |
| 1072 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1073 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
| 1074 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1075 | |
| 1076 | /* Set up the input context flags for the command */ |
| 1077 | /* FIXME: This won't work if a non-default control endpoint |
| 1078 | * changes max packet sizes. |
| 1079 | */ |
| 1080 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1081 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1082 | ctrl_ctx->drop_flags = 0; |
| 1083 | |
| 1084 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); |
| 1085 | xhci_dbg_ctx(xhci, in_ctx, ep_index); |
| 1086 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); |
| 1087 | xhci_dbg_ctx(xhci, out_ctx, ep_index); |
| 1088 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1089 | ret = xhci_configure_endpoint(xhci, urb->dev, NULL, |
| 1090 | true, false); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1091 | |
| 1092 | /* Clean up the input context for later use by bandwidth |
| 1093 | * functions. |
| 1094 | */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1095 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1096 | } |
| 1097 | return ret; |
| 1098 | } |
| 1099 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1100 | /* |
| 1101 | * non-error returns are a promise to giveback() the urb later |
| 1102 | * we drop ownership so next owner (or urb unlink) can get it |
| 1103 | */ |
| 1104 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
| 1105 | { |
| 1106 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1107 | struct xhci_td *buffer; |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1108 | unsigned long flags; |
| 1109 | int ret = 0; |
| 1110 | unsigned int slot_id, ep_index; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1111 | struct urb_priv *urb_priv; |
| 1112 | int size, i; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1113 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1114 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, |
| 1115 | true, true, __func__) <= 0) |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1116 | return -EINVAL; |
| 1117 | |
| 1118 | slot_id = urb->dev->slot_id; |
| 1119 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1120 | |
Alan Stern | 541c7d4 | 2010-06-22 16:39:10 -0400 | [diff] [blame] | 1121 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1122 | if (!in_interrupt()) |
| 1123 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); |
| 1124 | ret = -ESHUTDOWN; |
| 1125 | goto exit; |
| 1126 | } |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1127 | |
| 1128 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) |
| 1129 | size = urb->number_of_packets; |
| 1130 | else |
| 1131 | size = 1; |
| 1132 | |
| 1133 | urb_priv = kzalloc(sizeof(struct urb_priv) + |
| 1134 | size * sizeof(struct xhci_td *), mem_flags); |
| 1135 | if (!urb_priv) |
| 1136 | return -ENOMEM; |
| 1137 | |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1138 | buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); |
| 1139 | if (!buffer) { |
| 1140 | kfree(urb_priv); |
| 1141 | return -ENOMEM; |
| 1142 | } |
| 1143 | |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1144 | for (i = 0; i < size; i++) { |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1145 | urb_priv->td[i] = buffer; |
| 1146 | buffer++; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1147 | } |
| 1148 | |
| 1149 | urb_priv->length = size; |
| 1150 | urb_priv->td_cnt = 0; |
| 1151 | urb->hcpriv = urb_priv; |
| 1152 | |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1153 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
| 1154 | /* Check to see if the max packet size for the default control |
| 1155 | * endpoint changed during FS device enumeration |
| 1156 | */ |
| 1157 | if (urb->dev->speed == USB_SPEED_FULL) { |
| 1158 | ret = xhci_check_maxpacket(xhci, slot_id, |
| 1159 | ep_index, urb); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1160 | if (ret < 0) { |
| 1161 | xhci_urb_free_priv(xhci, urb_priv); |
| 1162 | urb->hcpriv = NULL; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1163 | return ret; |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1164 | } |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1165 | } |
| 1166 | |
Sarah Sharp | b11069f | 2009-07-27 12:03:23 -0700 | [diff] [blame] | 1167 | /* We have a spinlock and interrupts disabled, so we must pass |
| 1168 | * atomic context to this function, which may allocate memory. |
| 1169 | */ |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1170 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1171 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1172 | goto dying; |
Sarah Sharp | b11069f | 2009-07-27 12:03:23 -0700 | [diff] [blame] | 1173 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 1174 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1175 | if (ret) |
| 1176 | goto free_priv; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1177 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1178 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { |
| 1179 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1180 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1181 | goto dying; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 1182 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
| 1183 | EP_GETTING_STREAMS) { |
| 1184 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " |
| 1185 | "is transitioning to using streams.\n"); |
| 1186 | ret = -EINVAL; |
| 1187 | } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
| 1188 | EP_GETTING_NO_STREAMS) { |
| 1189 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " |
| 1190 | "is transitioning to " |
| 1191 | "not having streams.\n"); |
| 1192 | ret = -EINVAL; |
| 1193 | } else { |
| 1194 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
| 1195 | slot_id, ep_index); |
| 1196 | } |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1197 | if (ret) |
| 1198 | goto free_priv; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1199 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1200 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
| 1201 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1202 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1203 | goto dying; |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1204 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
| 1205 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1206 | if (ret) |
| 1207 | goto free_priv; |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1208 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1209 | } else { |
Andiry Xu | 787f4e5 | 2010-07-22 15:23:52 -0700 | [diff] [blame] | 1210 | spin_lock_irqsave(&xhci->lock, flags); |
| 1211 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1212 | goto dying; |
| 1213 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, |
| 1214 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1215 | if (ret) |
| 1216 | goto free_priv; |
Andiry Xu | 787f4e5 | 2010-07-22 15:23:52 -0700 | [diff] [blame] | 1217 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1218 | } |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1219 | exit: |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1220 | return ret; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1221 | dying: |
| 1222 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " |
| 1223 | "non-responsive xHCI host.\n", |
| 1224 | urb->ep->desc.bEndpointAddress, urb); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1225 | ret = -ESHUTDOWN; |
| 1226 | free_priv: |
| 1227 | xhci_urb_free_priv(xhci, urb_priv); |
| 1228 | urb->hcpriv = NULL; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1229 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1230 | return ret; |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1231 | } |
| 1232 | |
Sarah Sharp | 021bff9 | 2010-07-29 22:12:20 -0700 | [diff] [blame] | 1233 | /* Get the right ring for the given URB. |
| 1234 | * If the endpoint supports streams, boundary check the URB's stream ID. |
| 1235 | * If the endpoint doesn't support streams, return the singular endpoint ring. |
| 1236 | */ |
| 1237 | static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, |
| 1238 | struct urb *urb) |
| 1239 | { |
| 1240 | unsigned int slot_id; |
| 1241 | unsigned int ep_index; |
| 1242 | unsigned int stream_id; |
| 1243 | struct xhci_virt_ep *ep; |
| 1244 | |
| 1245 | slot_id = urb->dev->slot_id; |
| 1246 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
| 1247 | stream_id = urb->stream_id; |
| 1248 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
| 1249 | /* Common case: no streams */ |
| 1250 | if (!(ep->ep_state & EP_HAS_STREAMS)) |
| 1251 | return ep->ring; |
| 1252 | |
| 1253 | if (stream_id == 0) { |
| 1254 | xhci_warn(xhci, |
| 1255 | "WARN: Slot ID %u, ep index %u has streams, " |
| 1256 | "but URB has no stream ID.\n", |
| 1257 | slot_id, ep_index); |
| 1258 | return NULL; |
| 1259 | } |
| 1260 | |
| 1261 | if (stream_id < ep->stream_info->num_streams) |
| 1262 | return ep->stream_info->stream_rings[stream_id]; |
| 1263 | |
| 1264 | xhci_warn(xhci, |
| 1265 | "WARN: Slot ID %u, ep index %u has " |
| 1266 | "stream IDs 1 to %u allocated, " |
| 1267 | "but stream ID %u is requested.\n", |
| 1268 | slot_id, ep_index, |
| 1269 | ep->stream_info->num_streams - 1, |
| 1270 | stream_id); |
| 1271 | return NULL; |
| 1272 | } |
| 1273 | |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1274 | /* |
| 1275 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop |
| 1276 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC |
| 1277 | * should pick up where it left off in the TD, unless a Set Transfer Ring |
| 1278 | * Dequeue Pointer is issued. |
| 1279 | * |
| 1280 | * The TRBs that make up the buffers for the canceled URB will be "removed" from |
| 1281 | * the ring. Since the ring is a contiguous structure, they can't be physically |
| 1282 | * removed. Instead, there are two options: |
| 1283 | * |
| 1284 | * 1) If the HC is in the middle of processing the URB to be canceled, we |
| 1285 | * simply move the ring's dequeue pointer past those TRBs using the Set |
| 1286 | * Transfer Ring Dequeue Pointer command. This will be the common case, |
| 1287 | * when drivers timeout on the last submitted URB and attempt to cancel. |
| 1288 | * |
| 1289 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a |
| 1290 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The |
| 1291 | * HC will need to invalidate the any TRBs it has cached after the stop |
| 1292 | * endpoint command, as noted in the xHCI 0.95 errata. |
| 1293 | * |
| 1294 | * 3) The TD may have completed by the time the Stop Endpoint Command |
| 1295 | * completes, so software needs to handle that case too. |
| 1296 | * |
| 1297 | * This function should protect against the TD enqueueing code ringing the |
| 1298 | * doorbell while this code is waiting for a Stop Endpoint command to complete. |
| 1299 | * It also needs to account for multiple cancellations on happening at the same |
| 1300 | * time for the same endpoint. |
| 1301 | * |
| 1302 | * Note that this function can be called in any context, or so says |
| 1303 | * usb_hcd_unlink_urb() |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1304 | */ |
| 1305 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
| 1306 | { |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1307 | unsigned long flags; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1308 | int ret, i; |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1309 | u32 temp; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1310 | struct xhci_hcd *xhci; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1311 | struct urb_priv *urb_priv; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1312 | struct xhci_td *td; |
| 1313 | unsigned int ep_index; |
| 1314 | struct xhci_ring *ep_ring; |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 1315 | struct xhci_virt_ep *ep; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1316 | |
| 1317 | xhci = hcd_to_xhci(hcd); |
| 1318 | spin_lock_irqsave(&xhci->lock, flags); |
| 1319 | /* Make sure the URB hasn't completed or been unlinked already */ |
| 1320 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
| 1321 | if (ret || !urb->hcpriv) |
| 1322 | goto done; |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1323 | temp = xhci_readl(xhci, &xhci->op_regs->status); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 1324 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1325 | xhci_dbg(xhci, "HW died, freeing TD.\n"); |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1326 | urb_priv = urb->hcpriv; |
Sarah Sharp | 585df1d | 2011-08-02 15:43:40 -0700 | [diff] [blame] | 1327 | for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { |
| 1328 | td = urb_priv->td[i]; |
| 1329 | if (!list_empty(&td->td_list)) |
| 1330 | list_del_init(&td->td_list); |
| 1331 | if (!list_empty(&td->cancelled_td_list)) |
| 1332 | list_del_init(&td->cancelled_td_list); |
| 1333 | } |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1334 | |
| 1335 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
| 1336 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 214f76f | 2010-10-26 11:22:02 -0700 | [diff] [blame] | 1337 | usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1338 | xhci_urb_free_priv(xhci, urb_priv); |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1339 | return ret; |
| 1340 | } |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 1341 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
| 1342 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1343 | xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " |
| 1344 | "non-responsive xHCI host.\n", |
| 1345 | urb->ep->desc.bEndpointAddress, urb); |
| 1346 | /* Let the stop endpoint command watchdog timer (which set this |
| 1347 | * state) finish cleaning up the endpoint TD lists. We must |
| 1348 | * have caught it in the middle of dropping a lock and giving |
| 1349 | * back an URB. |
| 1350 | */ |
| 1351 | goto done; |
| 1352 | } |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1353 | |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1354 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 1355 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 1356 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
| 1357 | if (!ep_ring) { |
| 1358 | ret = -EINVAL; |
| 1359 | goto done; |
| 1360 | } |
| 1361 | |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1362 | urb_priv = urb->hcpriv; |
Sarah Sharp | 79688ac | 2011-12-19 16:56:04 -0800 | [diff] [blame] | 1363 | i = urb_priv->td_cnt; |
| 1364 | if (i < urb_priv->length) |
| 1365 | xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, " |
| 1366 | "starting at offset 0x%llx\n", |
| 1367 | urb, urb->dev->devpath, |
| 1368 | urb->ep->desc.bEndpointAddress, |
| 1369 | (unsigned long long) xhci_trb_virt_to_dma( |
| 1370 | urb_priv->td[i]->start_seg, |
| 1371 | urb_priv->td[i]->first_trb)); |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1372 | |
Sarah Sharp | 79688ac | 2011-12-19 16:56:04 -0800 | [diff] [blame] | 1373 | for (; i < urb_priv->length; i++) { |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1374 | td = urb_priv->td[i]; |
| 1375 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
| 1376 | } |
| 1377 | |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1378 | /* Queue a stop endpoint command, but only if this is |
| 1379 | * the first cancellation to be handled. |
| 1380 | */ |
Sarah Sharp | 678539c | 2009-10-27 10:55:52 -0700 | [diff] [blame] | 1381 | if (!(ep->ep_state & EP_HALT_PENDING)) { |
| 1382 | ep->ep_state |= EP_HALT_PENDING; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1383 | ep->stop_cmds_pending++; |
| 1384 | ep->stop_cmd_timer.expires = jiffies + |
| 1385 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; |
| 1386 | add_timer(&ep->stop_cmd_timer); |
Andiry Xu | be88fe4 | 2010-10-14 07:22:57 -0700 | [diff] [blame] | 1387 | xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 1388 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1389 | } |
| 1390 | done: |
| 1391 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1392 | return ret; |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1393 | } |
| 1394 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1395 | /* Drop an endpoint from a new bandwidth configuration for this device. |
| 1396 | * Only one call to this function is allowed per endpoint before |
| 1397 | * check_bandwidth() or reset_bandwidth() must be called. |
| 1398 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
| 1399 | * add the endpoint to the schedule with possibly new parameters denoted by a |
| 1400 | * different endpoint descriptor in usb_host_endpoint. |
| 1401 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
| 1402 | * not allowed. |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1403 | * |
| 1404 | * The USB core will not allow URBs to be queued to an endpoint that is being |
| 1405 | * disabled, so there's no need for mutual exclusion to protect |
| 1406 | * the xhci->devs[slot_id] structure. |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1407 | */ |
| 1408 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
| 1409 | struct usb_host_endpoint *ep) |
| 1410 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1411 | struct xhci_hcd *xhci; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1412 | struct xhci_container_ctx *in_ctx, *out_ctx; |
| 1413 | struct xhci_input_control_ctx *ctrl_ctx; |
| 1414 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1415 | unsigned int last_ctx; |
| 1416 | unsigned int ep_index; |
| 1417 | struct xhci_ep_ctx *ep_ctx; |
| 1418 | u32 drop_flag; |
| 1419 | u32 new_add_flags, new_drop_flags, new_slot_info; |
| 1420 | int ret; |
| 1421 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1422 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1423 | if (ret <= 0) |
| 1424 | return ret; |
| 1425 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1426 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1427 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1428 | |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1429 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1430 | drop_flag = xhci_get_endpoint_flag(&ep->desc); |
| 1431 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { |
| 1432 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", |
| 1433 | __func__, drop_flag); |
| 1434 | return 0; |
| 1435 | } |
| 1436 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1437 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1438 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
| 1439 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1440 | ep_index = xhci_get_endpoint_index(&ep->desc); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1441 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1442 | /* If the HC already knows the endpoint is disabled, |
| 1443 | * or the HCD has noted it is disabled, ignore this request |
| 1444 | */ |
Matt Evans | f5960b6 | 2011-06-01 10:22:55 +1000 | [diff] [blame] | 1445 | if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == |
| 1446 | cpu_to_le32(EP_STATE_DISABLED)) || |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1447 | le32_to_cpu(ctrl_ctx->drop_flags) & |
| 1448 | xhci_get_endpoint_flag(&ep->desc)) { |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 1449 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
| 1450 | __func__, ep); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1451 | return 0; |
| 1452 | } |
| 1453 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1454 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
| 1455 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1456 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1457 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
| 1458 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1459 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1460 | last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1461 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1462 | /* Update the last valid endpoint context, if we deleted the last one */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1463 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > |
| 1464 | LAST_CTX(last_ctx)) { |
| 1465 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
| 1466 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1467 | } |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1468 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1469 | |
| 1470 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
| 1471 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1472 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
| 1473 | (unsigned int) ep->desc.bEndpointAddress, |
| 1474 | udev->slot_id, |
| 1475 | (unsigned int) new_drop_flags, |
| 1476 | (unsigned int) new_add_flags, |
| 1477 | (unsigned int) new_slot_info); |
| 1478 | return 0; |
| 1479 | } |
| 1480 | |
| 1481 | /* Add an endpoint to a new possible bandwidth configuration for this device. |
| 1482 | * Only one call to this function is allowed per endpoint before |
| 1483 | * check_bandwidth() or reset_bandwidth() must be called. |
| 1484 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
| 1485 | * add the endpoint to the schedule with possibly new parameters denoted by a |
| 1486 | * different endpoint descriptor in usb_host_endpoint. |
| 1487 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
| 1488 | * not allowed. |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1489 | * |
| 1490 | * The USB core will not allow URBs to be queued to an endpoint until the |
| 1491 | * configuration or alt setting is installed in the device, so there's no need |
| 1492 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1493 | */ |
| 1494 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
| 1495 | struct usb_host_endpoint *ep) |
| 1496 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1497 | struct xhci_hcd *xhci; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1498 | struct xhci_container_ctx *in_ctx, *out_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1499 | unsigned int ep_index; |
| 1500 | struct xhci_ep_ctx *ep_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1501 | struct xhci_slot_ctx *slot_ctx; |
| 1502 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1503 | u32 added_ctxs; |
| 1504 | unsigned int last_ctx; |
| 1505 | u32 new_add_flags, new_drop_flags, new_slot_info; |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1506 | struct xhci_virt_device *virt_dev; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1507 | int ret = 0; |
| 1508 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1509 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1510 | if (ret <= 0) { |
| 1511 | /* So we won't queue a reset ep command for a root hub */ |
| 1512 | ep->hcpriv = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1513 | return ret; |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1514 | } |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1515 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1516 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1517 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1518 | |
| 1519 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); |
| 1520 | last_ctx = xhci_last_valid_endpoint(added_ctxs); |
| 1521 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { |
| 1522 | /* FIXME when we have to issue an evaluate endpoint command to |
| 1523 | * deal with ep0 max packet size changing once we get the |
| 1524 | * descriptors |
| 1525 | */ |
| 1526 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", |
| 1527 | __func__, added_ctxs); |
| 1528 | return 0; |
| 1529 | } |
| 1530 | |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1531 | virt_dev = xhci->devs[udev->slot_id]; |
| 1532 | in_ctx = virt_dev->in_ctx; |
| 1533 | out_ctx = virt_dev->out_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1534 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1535 | ep_index = xhci_get_endpoint_index(&ep->desc); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1536 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1537 | |
| 1538 | /* If this endpoint is already in use, and the upper layers are trying |
| 1539 | * to add it again without dropping it, reject the addition. |
| 1540 | */ |
| 1541 | if (virt_dev->eps[ep_index].ring && |
| 1542 | !(le32_to_cpu(ctrl_ctx->drop_flags) & |
| 1543 | xhci_get_endpoint_flag(&ep->desc))) { |
| 1544 | xhci_warn(xhci, "Trying to add endpoint 0x%x " |
| 1545 | "without dropping it.\n", |
| 1546 | (unsigned int) ep->desc.bEndpointAddress); |
| 1547 | return -EINVAL; |
| 1548 | } |
| 1549 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1550 | /* If the HCD has already noted the endpoint is enabled, |
| 1551 | * ignore this request. |
| 1552 | */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1553 | if (le32_to_cpu(ctrl_ctx->add_flags) & |
| 1554 | xhci_get_endpoint_flag(&ep->desc)) { |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 1555 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
| 1556 | __func__, ep); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1557 | return 0; |
| 1558 | } |
| 1559 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1560 | /* |
| 1561 | * Configuration and alternate setting changes must be done in |
| 1562 | * process context, not interrupt context (or so documenation |
| 1563 | * for usb_set_interface() and usb_set_configuration() claim). |
| 1564 | */ |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1565 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1566 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", |
| 1567 | __func__, ep->desc.bEndpointAddress); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1568 | return -ENOMEM; |
| 1569 | } |
| 1570 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1571 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
| 1572 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1573 | |
| 1574 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
| 1575 | * xHC hasn't been notified yet through the check_bandwidth() call, |
| 1576 | * this re-adds a new state for the endpoint from the new endpoint |
| 1577 | * descriptors. We must drop and re-add this endpoint, so we leave the |
| 1578 | * drop flags alone. |
| 1579 | */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1580 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1581 | |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1582 | slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1583 | /* Update the last valid endpoint context, if we just added one past */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1584 | if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < |
| 1585 | LAST_CTX(last_ctx)) { |
| 1586 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
| 1587 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1588 | } |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1589 | new_slot_info = le32_to_cpu(slot_ctx->dev_info); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1590 | |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1591 | /* Store the usb_device pointer for later use */ |
| 1592 | ep->hcpriv = udev; |
| 1593 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1594 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", |
| 1595 | (unsigned int) ep->desc.bEndpointAddress, |
| 1596 | udev->slot_id, |
| 1597 | (unsigned int) new_drop_flags, |
| 1598 | (unsigned int) new_add_flags, |
| 1599 | (unsigned int) new_slot_info); |
| 1600 | return 0; |
| 1601 | } |
| 1602 | |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1603 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1604 | { |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1605 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1606 | struct xhci_ep_ctx *ep_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1607 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1608 | int i; |
| 1609 | |
| 1610 | /* When a device's add flag and drop flag are zero, any subsequent |
| 1611 | * configure endpoint command will leave that endpoint's state |
| 1612 | * untouched. Make sure we don't leave any old state in the input |
| 1613 | * endpoint contexts. |
| 1614 | */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1615 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
| 1616 | ctrl_ctx->drop_flags = 0; |
| 1617 | ctrl_ctx->add_flags = 0; |
| 1618 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1619 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1620 | /* Endpoint 0 is always valid */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1621 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1622 | for (i = 1; i < 31; ++i) { |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1623 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1624 | ep_ctx->ep_info = 0; |
| 1625 | ep_ctx->ep_info2 = 0; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 1626 | ep_ctx->deq = 0; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1627 | ep_ctx->tx_info = 0; |
| 1628 | } |
| 1629 | } |
| 1630 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1631 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
Sarah Sharp | 00161f7 | 2011-04-28 12:23:23 -0700 | [diff] [blame] | 1632 | struct usb_device *udev, u32 *cmd_status) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1633 | { |
| 1634 | int ret; |
| 1635 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1636 | switch (*cmd_status) { |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1637 | case COMP_ENOMEM: |
| 1638 | dev_warn(&udev->dev, "Not enough host controller resources " |
| 1639 | "for new device state.\n"); |
| 1640 | ret = -ENOMEM; |
| 1641 | /* FIXME: can we allocate more resources for the HC? */ |
| 1642 | break; |
| 1643 | case COMP_BW_ERR: |
Hans de Goede | 71d8572 | 2012-01-04 23:29:18 +0100 | [diff] [blame] | 1644 | case COMP_2ND_BW_ERR: |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1645 | dev_warn(&udev->dev, "Not enough bandwidth " |
| 1646 | "for new device state.\n"); |
| 1647 | ret = -ENOSPC; |
| 1648 | /* FIXME: can we go back to the old state? */ |
| 1649 | break; |
| 1650 | case COMP_TRB_ERR: |
| 1651 | /* the HCD set up something wrong */ |
| 1652 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " |
| 1653 | "add flag = 1, " |
| 1654 | "and endpoint is not disabled.\n"); |
| 1655 | ret = -EINVAL; |
| 1656 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 1657 | case COMP_DEV_ERR: |
| 1658 | dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint " |
| 1659 | "configure command.\n"); |
| 1660 | ret = -ENODEV; |
| 1661 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1662 | case COMP_SUCCESS: |
| 1663 | dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); |
| 1664 | ret = 0; |
| 1665 | break; |
| 1666 | default: |
| 1667 | xhci_err(xhci, "ERROR: unexpected command completion " |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1668 | "code 0x%x.\n", *cmd_status); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1669 | ret = -EINVAL; |
| 1670 | break; |
| 1671 | } |
| 1672 | return ret; |
| 1673 | } |
| 1674 | |
| 1675 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
Sarah Sharp | 00161f7 | 2011-04-28 12:23:23 -0700 | [diff] [blame] | 1676 | struct usb_device *udev, u32 *cmd_status) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1677 | { |
| 1678 | int ret; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1679 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1680 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1681 | switch (*cmd_status) { |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1682 | case COMP_EINVAL: |
| 1683 | dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " |
| 1684 | "context command.\n"); |
| 1685 | ret = -EINVAL; |
| 1686 | break; |
| 1687 | case COMP_EBADSLT: |
| 1688 | dev_warn(&udev->dev, "WARN: slot not enabled for" |
| 1689 | "evaluate context command.\n"); |
| 1690 | case COMP_CTX_STATE: |
| 1691 | dev_warn(&udev->dev, "WARN: invalid context state for " |
| 1692 | "evaluate context command.\n"); |
| 1693 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); |
| 1694 | ret = -EINVAL; |
| 1695 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 1696 | case COMP_DEV_ERR: |
| 1697 | dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate " |
| 1698 | "context command.\n"); |
| 1699 | ret = -ENODEV; |
| 1700 | break; |
Alex He | 1bb73a8 | 2011-05-05 18:14:12 +0800 | [diff] [blame] | 1701 | case COMP_MEL_ERR: |
| 1702 | /* Max Exit Latency too large error */ |
| 1703 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); |
| 1704 | ret = -EINVAL; |
| 1705 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1706 | case COMP_SUCCESS: |
| 1707 | dev_dbg(&udev->dev, "Successful evaluate context command\n"); |
| 1708 | ret = 0; |
| 1709 | break; |
| 1710 | default: |
| 1711 | xhci_err(xhci, "ERROR: unexpected command completion " |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1712 | "code 0x%x.\n", *cmd_status); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1713 | ret = -EINVAL; |
| 1714 | break; |
| 1715 | } |
| 1716 | return ret; |
| 1717 | } |
| 1718 | |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1719 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, |
| 1720 | struct xhci_container_ctx *in_ctx) |
| 1721 | { |
| 1722 | struct xhci_input_control_ctx *ctrl_ctx; |
| 1723 | u32 valid_add_flags; |
| 1724 | u32 valid_drop_flags; |
| 1725 | |
| 1726 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
| 1727 | /* Ignore the slot flag (bit 0), and the default control endpoint flag |
| 1728 | * (bit 1). The default control endpoint is added during the Address |
| 1729 | * Device command and is never removed until the slot is disabled. |
| 1730 | */ |
| 1731 | valid_add_flags = ctrl_ctx->add_flags >> 2; |
| 1732 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; |
| 1733 | |
| 1734 | /* Use hweight32 to count the number of ones in the add flags, or |
| 1735 | * number of endpoints added. Don't count endpoints that are changed |
| 1736 | * (both added and dropped). |
| 1737 | */ |
| 1738 | return hweight32(valid_add_flags) - |
| 1739 | hweight32(valid_add_flags & valid_drop_flags); |
| 1740 | } |
| 1741 | |
| 1742 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, |
| 1743 | struct xhci_container_ctx *in_ctx) |
| 1744 | { |
| 1745 | struct xhci_input_control_ctx *ctrl_ctx; |
| 1746 | u32 valid_add_flags; |
| 1747 | u32 valid_drop_flags; |
| 1748 | |
| 1749 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
| 1750 | valid_add_flags = ctrl_ctx->add_flags >> 2; |
| 1751 | valid_drop_flags = ctrl_ctx->drop_flags >> 2; |
| 1752 | |
| 1753 | return hweight32(valid_drop_flags) - |
| 1754 | hweight32(valid_add_flags & valid_drop_flags); |
| 1755 | } |
| 1756 | |
| 1757 | /* |
| 1758 | * We need to reserve the new number of endpoints before the configure endpoint |
| 1759 | * command completes. We can't subtract the dropped endpoints from the number |
| 1760 | * of active endpoints until the command completes because we can oversubscribe |
| 1761 | * the host in this case: |
| 1762 | * |
| 1763 | * - the first configure endpoint command drops more endpoints than it adds |
| 1764 | * - a second configure endpoint command that adds more endpoints is queued |
| 1765 | * - the first configure endpoint command fails, so the config is unchanged |
| 1766 | * - the second command may succeed, even though there isn't enough resources |
| 1767 | * |
| 1768 | * Must be called with xhci->lock held. |
| 1769 | */ |
| 1770 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, |
| 1771 | struct xhci_container_ctx *in_ctx) |
| 1772 | { |
| 1773 | u32 added_eps; |
| 1774 | |
| 1775 | added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); |
| 1776 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { |
| 1777 | xhci_dbg(xhci, "Not enough ep ctxs: " |
| 1778 | "%u active, need to add %u, limit is %u.\n", |
| 1779 | xhci->num_active_eps, added_eps, |
| 1780 | xhci->limit_active_eps); |
| 1781 | return -ENOMEM; |
| 1782 | } |
| 1783 | xhci->num_active_eps += added_eps; |
| 1784 | xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, |
| 1785 | xhci->num_active_eps); |
| 1786 | return 0; |
| 1787 | } |
| 1788 | |
| 1789 | /* |
| 1790 | * The configure endpoint was failed by the xHC for some other reason, so we |
| 1791 | * need to revert the resources that failed configuration would have used. |
| 1792 | * |
| 1793 | * Must be called with xhci->lock held. |
| 1794 | */ |
| 1795 | static void xhci_free_host_resources(struct xhci_hcd *xhci, |
| 1796 | struct xhci_container_ctx *in_ctx) |
| 1797 | { |
| 1798 | u32 num_failed_eps; |
| 1799 | |
| 1800 | num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); |
| 1801 | xhci->num_active_eps -= num_failed_eps; |
| 1802 | xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", |
| 1803 | num_failed_eps, |
| 1804 | xhci->num_active_eps); |
| 1805 | } |
| 1806 | |
| 1807 | /* |
| 1808 | * Now that the command has completed, clean up the active endpoint count by |
| 1809 | * subtracting out the endpoints that were dropped (but not changed). |
| 1810 | * |
| 1811 | * Must be called with xhci->lock held. |
| 1812 | */ |
| 1813 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, |
| 1814 | struct xhci_container_ctx *in_ctx) |
| 1815 | { |
| 1816 | u32 num_dropped_eps; |
| 1817 | |
| 1818 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); |
| 1819 | xhci->num_active_eps -= num_dropped_eps; |
| 1820 | if (num_dropped_eps) |
| 1821 | xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", |
| 1822 | num_dropped_eps, |
| 1823 | xhci->num_active_eps); |
| 1824 | } |
| 1825 | |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 1826 | unsigned int xhci_get_block_size(struct usb_device *udev) |
| 1827 | { |
| 1828 | switch (udev->speed) { |
| 1829 | case USB_SPEED_LOW: |
| 1830 | case USB_SPEED_FULL: |
| 1831 | return FS_BLOCK; |
| 1832 | case USB_SPEED_HIGH: |
| 1833 | return HS_BLOCK; |
| 1834 | case USB_SPEED_SUPER: |
| 1835 | return SS_BLOCK; |
| 1836 | case USB_SPEED_UNKNOWN: |
| 1837 | case USB_SPEED_WIRELESS: |
| 1838 | default: |
| 1839 | /* Should never happen */ |
| 1840 | return 1; |
| 1841 | } |
| 1842 | } |
| 1843 | |
| 1844 | unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) |
| 1845 | { |
| 1846 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) |
| 1847 | return LS_OVERHEAD; |
| 1848 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) |
| 1849 | return FS_OVERHEAD; |
| 1850 | return HS_OVERHEAD; |
| 1851 | } |
| 1852 | |
| 1853 | /* If we are changing a LS/FS device under a HS hub, |
| 1854 | * make sure (if we are activating a new TT) that the HS bus has enough |
| 1855 | * bandwidth for this new TT. |
| 1856 | */ |
| 1857 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, |
| 1858 | struct xhci_virt_device *virt_dev, |
| 1859 | int old_active_eps) |
| 1860 | { |
| 1861 | struct xhci_interval_bw_table *bw_table; |
| 1862 | struct xhci_tt_bw_info *tt_info; |
| 1863 | |
| 1864 | /* Find the bandwidth table for the root port this TT is attached to. */ |
| 1865 | bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; |
| 1866 | tt_info = virt_dev->tt_info; |
| 1867 | /* If this TT already had active endpoints, the bandwidth for this TT |
| 1868 | * has already been added. Removing all periodic endpoints (and thus |
| 1869 | * making the TT enactive) will only decrease the bandwidth used. |
| 1870 | */ |
| 1871 | if (old_active_eps) |
| 1872 | return 0; |
| 1873 | if (old_active_eps == 0 && tt_info->active_eps != 0) { |
| 1874 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) |
| 1875 | return -ENOMEM; |
| 1876 | return 0; |
| 1877 | } |
| 1878 | /* Not sure why we would have no new active endpoints... |
| 1879 | * |
| 1880 | * Maybe because of an Evaluate Context change for a hub update or a |
| 1881 | * control endpoint 0 max packet size change? |
| 1882 | * FIXME: skip the bandwidth calculation in that case. |
| 1883 | */ |
| 1884 | return 0; |
| 1885 | } |
| 1886 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 1887 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, |
| 1888 | struct xhci_virt_device *virt_dev) |
| 1889 | { |
| 1890 | unsigned int bw_reserved; |
| 1891 | |
| 1892 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); |
| 1893 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) |
| 1894 | return -ENOMEM; |
| 1895 | |
| 1896 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); |
| 1897 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) |
| 1898 | return -ENOMEM; |
| 1899 | |
| 1900 | return 0; |
| 1901 | } |
| 1902 | |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 1903 | /* |
| 1904 | * This algorithm is a very conservative estimate of the worst-case scheduling |
| 1905 | * scenario for any one interval. The hardware dynamically schedules the |
| 1906 | * packets, so we can't tell which microframe could be the limiting factor in |
| 1907 | * the bandwidth scheduling. This only takes into account periodic endpoints. |
| 1908 | * |
| 1909 | * Obviously, we can't solve an NP complete problem to find the minimum worst |
| 1910 | * case scenario. Instead, we come up with an estimate that is no less than |
| 1911 | * the worst case bandwidth used for any one microframe, but may be an |
| 1912 | * over-estimate. |
| 1913 | * |
| 1914 | * We walk the requirements for each endpoint by interval, starting with the |
| 1915 | * smallest interval, and place packets in the schedule where there is only one |
| 1916 | * possible way to schedule packets for that interval. In order to simplify |
| 1917 | * this algorithm, we record the largest max packet size for each interval, and |
| 1918 | * assume all packets will be that size. |
| 1919 | * |
| 1920 | * For interval 0, we obviously must schedule all packets for each interval. |
| 1921 | * The bandwidth for interval 0 is just the amount of data to be transmitted |
| 1922 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times |
| 1923 | * the number of packets). |
| 1924 | * |
| 1925 | * For interval 1, we have two possible microframes to schedule those packets |
| 1926 | * in. For this algorithm, if we can schedule the same number of packets for |
| 1927 | * each possible scheduling opportunity (each microframe), we will do so. The |
| 1928 | * remaining number of packets will be saved to be transmitted in the gaps in |
| 1929 | * the next interval's scheduling sequence. |
| 1930 | * |
| 1931 | * As we move those remaining packets to be scheduled with interval 2 packets, |
| 1932 | * we have to double the number of remaining packets to transmit. This is |
| 1933 | * because the intervals are actually powers of 2, and we would be transmitting |
| 1934 | * the previous interval's packets twice in this interval. We also have to be |
| 1935 | * sure that when we look at the largest max packet size for this interval, we |
| 1936 | * also look at the largest max packet size for the remaining packets and take |
| 1937 | * the greater of the two. |
| 1938 | * |
| 1939 | * The algorithm continues to evenly distribute packets in each scheduling |
| 1940 | * opportunity, and push the remaining packets out, until we get to the last |
| 1941 | * interval. Then those packets and their associated overhead are just added |
| 1942 | * to the bandwidth used. |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 1943 | */ |
| 1944 | static int xhci_check_bw_table(struct xhci_hcd *xhci, |
| 1945 | struct xhci_virt_device *virt_dev, |
| 1946 | int old_active_eps) |
| 1947 | { |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 1948 | unsigned int bw_reserved; |
| 1949 | unsigned int max_bandwidth; |
| 1950 | unsigned int bw_used; |
| 1951 | unsigned int block_size; |
| 1952 | struct xhci_interval_bw_table *bw_table; |
| 1953 | unsigned int packet_size = 0; |
| 1954 | unsigned int overhead = 0; |
| 1955 | unsigned int packets_transmitted = 0; |
| 1956 | unsigned int packets_remaining = 0; |
| 1957 | unsigned int i; |
| 1958 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 1959 | if (virt_dev->udev->speed == USB_SPEED_SUPER) |
| 1960 | return xhci_check_ss_bw(xhci, virt_dev); |
| 1961 | |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 1962 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { |
| 1963 | max_bandwidth = HS_BW_LIMIT; |
| 1964 | /* Convert percent of bus BW reserved to blocks reserved */ |
| 1965 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); |
| 1966 | } else { |
| 1967 | max_bandwidth = FS_BW_LIMIT; |
| 1968 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); |
| 1969 | } |
| 1970 | |
| 1971 | bw_table = virt_dev->bw_table; |
| 1972 | /* We need to translate the max packet size and max ESIT payloads into |
| 1973 | * the units the hardware uses. |
| 1974 | */ |
| 1975 | block_size = xhci_get_block_size(virt_dev->udev); |
| 1976 | |
| 1977 | /* If we are manipulating a LS/FS device under a HS hub, double check |
| 1978 | * that the HS bus has enough bandwidth if we are activing a new TT. |
| 1979 | */ |
| 1980 | if (virt_dev->tt_info) { |
| 1981 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", |
| 1982 | virt_dev->real_port); |
| 1983 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { |
| 1984 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " |
| 1985 | "newly activated TT.\n"); |
| 1986 | return -ENOMEM; |
| 1987 | } |
| 1988 | xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", |
| 1989 | virt_dev->tt_info->slot_id, |
| 1990 | virt_dev->tt_info->ttport); |
| 1991 | } else { |
| 1992 | xhci_dbg(xhci, "Recalculating BW for rootport %u\n", |
| 1993 | virt_dev->real_port); |
| 1994 | } |
| 1995 | |
| 1996 | /* Add in how much bandwidth will be used for interval zero, or the |
| 1997 | * rounded max ESIT payload + number of packets * largest overhead. |
| 1998 | */ |
| 1999 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + |
| 2000 | bw_table->interval_bw[0].num_packets * |
| 2001 | xhci_get_largest_overhead(&bw_table->interval_bw[0]); |
| 2002 | |
| 2003 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { |
| 2004 | unsigned int bw_added; |
| 2005 | unsigned int largest_mps; |
| 2006 | unsigned int interval_overhead; |
| 2007 | |
| 2008 | /* |
| 2009 | * How many packets could we transmit in this interval? |
| 2010 | * If packets didn't fit in the previous interval, we will need |
| 2011 | * to transmit that many packets twice within this interval. |
| 2012 | */ |
| 2013 | packets_remaining = 2 * packets_remaining + |
| 2014 | bw_table->interval_bw[i].num_packets; |
| 2015 | |
| 2016 | /* Find the largest max packet size of this or the previous |
| 2017 | * interval. |
| 2018 | */ |
| 2019 | if (list_empty(&bw_table->interval_bw[i].endpoints)) |
| 2020 | largest_mps = 0; |
| 2021 | else { |
| 2022 | struct xhci_virt_ep *virt_ep; |
| 2023 | struct list_head *ep_entry; |
| 2024 | |
| 2025 | ep_entry = bw_table->interval_bw[i].endpoints.next; |
| 2026 | virt_ep = list_entry(ep_entry, |
| 2027 | struct xhci_virt_ep, bw_endpoint_list); |
| 2028 | /* Convert to blocks, rounding up */ |
| 2029 | largest_mps = DIV_ROUND_UP( |
| 2030 | virt_ep->bw_info.max_packet_size, |
| 2031 | block_size); |
| 2032 | } |
| 2033 | if (largest_mps > packet_size) |
| 2034 | packet_size = largest_mps; |
| 2035 | |
| 2036 | /* Use the larger overhead of this or the previous interval. */ |
| 2037 | interval_overhead = xhci_get_largest_overhead( |
| 2038 | &bw_table->interval_bw[i]); |
| 2039 | if (interval_overhead > overhead) |
| 2040 | overhead = interval_overhead; |
| 2041 | |
| 2042 | /* How many packets can we evenly distribute across |
| 2043 | * (1 << (i + 1)) possible scheduling opportunities? |
| 2044 | */ |
| 2045 | packets_transmitted = packets_remaining >> (i + 1); |
| 2046 | |
| 2047 | /* Add in the bandwidth used for those scheduled packets */ |
| 2048 | bw_added = packets_transmitted * (overhead + packet_size); |
| 2049 | |
| 2050 | /* How many packets do we have remaining to transmit? */ |
| 2051 | packets_remaining = packets_remaining % (1 << (i + 1)); |
| 2052 | |
| 2053 | /* What largest max packet size should those packets have? */ |
| 2054 | /* If we've transmitted all packets, don't carry over the |
| 2055 | * largest packet size. |
| 2056 | */ |
| 2057 | if (packets_remaining == 0) { |
| 2058 | packet_size = 0; |
| 2059 | overhead = 0; |
| 2060 | } else if (packets_transmitted > 0) { |
| 2061 | /* Otherwise if we do have remaining packets, and we've |
| 2062 | * scheduled some packets in this interval, take the |
| 2063 | * largest max packet size from endpoints with this |
| 2064 | * interval. |
| 2065 | */ |
| 2066 | packet_size = largest_mps; |
| 2067 | overhead = interval_overhead; |
| 2068 | } |
| 2069 | /* Otherwise carry over packet_size and overhead from the last |
| 2070 | * time we had a remainder. |
| 2071 | */ |
| 2072 | bw_used += bw_added; |
| 2073 | if (bw_used > max_bandwidth) { |
| 2074 | xhci_warn(xhci, "Not enough bandwidth. " |
| 2075 | "Proposed: %u, Max: %u\n", |
| 2076 | bw_used, max_bandwidth); |
| 2077 | return -ENOMEM; |
| 2078 | } |
| 2079 | } |
| 2080 | /* |
| 2081 | * Ok, we know we have some packets left over after even-handedly |
| 2082 | * scheduling interval 15. We don't know which microframes they will |
| 2083 | * fit into, so we over-schedule and say they will be scheduled every |
| 2084 | * microframe. |
| 2085 | */ |
| 2086 | if (packets_remaining > 0) |
| 2087 | bw_used += overhead + packet_size; |
| 2088 | |
| 2089 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { |
| 2090 | unsigned int port_index = virt_dev->real_port - 1; |
| 2091 | |
| 2092 | /* OK, we're manipulating a HS device attached to a |
| 2093 | * root port bandwidth domain. Include the number of active TTs |
| 2094 | * in the bandwidth used. |
| 2095 | */ |
| 2096 | bw_used += TT_HS_OVERHEAD * |
| 2097 | xhci->rh_bw[port_index].num_active_tts; |
| 2098 | } |
| 2099 | |
| 2100 | xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " |
| 2101 | "Available: %u " "percent\n", |
| 2102 | bw_used, max_bandwidth, bw_reserved, |
| 2103 | (max_bandwidth - bw_used - bw_reserved) * 100 / |
| 2104 | max_bandwidth); |
| 2105 | |
| 2106 | bw_used += bw_reserved; |
| 2107 | if (bw_used > max_bandwidth) { |
| 2108 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", |
| 2109 | bw_used, max_bandwidth); |
| 2110 | return -ENOMEM; |
| 2111 | } |
| 2112 | |
| 2113 | bw_table->bw_used = bw_used; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2114 | return 0; |
| 2115 | } |
| 2116 | |
| 2117 | static bool xhci_is_async_ep(unsigned int ep_type) |
| 2118 | { |
| 2119 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && |
| 2120 | ep_type != ISOC_IN_EP && |
| 2121 | ep_type != INT_IN_EP); |
| 2122 | } |
| 2123 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2124 | static bool xhci_is_sync_in_ep(unsigned int ep_type) |
| 2125 | { |
| 2126 | return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP); |
| 2127 | } |
| 2128 | |
| 2129 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) |
| 2130 | { |
| 2131 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); |
| 2132 | |
| 2133 | if (ep_bw->ep_interval == 0) |
| 2134 | return SS_OVERHEAD_BURST + |
| 2135 | (ep_bw->mult * ep_bw->num_packets * |
| 2136 | (SS_OVERHEAD + mps)); |
| 2137 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * |
| 2138 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), |
| 2139 | 1 << ep_bw->ep_interval); |
| 2140 | |
| 2141 | } |
| 2142 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2143 | void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, |
| 2144 | struct xhci_bw_info *ep_bw, |
| 2145 | struct xhci_interval_bw_table *bw_table, |
| 2146 | struct usb_device *udev, |
| 2147 | struct xhci_virt_ep *virt_ep, |
| 2148 | struct xhci_tt_bw_info *tt_info) |
| 2149 | { |
| 2150 | struct xhci_interval_bw *interval_bw; |
| 2151 | int normalized_interval; |
| 2152 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2153 | if (xhci_is_async_ep(ep_bw->type)) |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2154 | return; |
| 2155 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2156 | if (udev->speed == USB_SPEED_SUPER) { |
| 2157 | if (xhci_is_sync_in_ep(ep_bw->type)) |
| 2158 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= |
| 2159 | xhci_get_ss_bw_consumed(ep_bw); |
| 2160 | else |
| 2161 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= |
| 2162 | xhci_get_ss_bw_consumed(ep_bw); |
| 2163 | return; |
| 2164 | } |
| 2165 | |
| 2166 | /* SuperSpeed endpoints never get added to intervals in the table, so |
| 2167 | * this check is only valid for HS/FS/LS devices. |
| 2168 | */ |
| 2169 | if (list_empty(&virt_ep->bw_endpoint_list)) |
| 2170 | return; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2171 | /* For LS/FS devices, we need to translate the interval expressed in |
| 2172 | * microframes to frames. |
| 2173 | */ |
| 2174 | if (udev->speed == USB_SPEED_HIGH) |
| 2175 | normalized_interval = ep_bw->ep_interval; |
| 2176 | else |
| 2177 | normalized_interval = ep_bw->ep_interval - 3; |
| 2178 | |
| 2179 | if (normalized_interval == 0) |
| 2180 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; |
| 2181 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
| 2182 | interval_bw->num_packets -= ep_bw->num_packets; |
| 2183 | switch (udev->speed) { |
| 2184 | case USB_SPEED_LOW: |
| 2185 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; |
| 2186 | break; |
| 2187 | case USB_SPEED_FULL: |
| 2188 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; |
| 2189 | break; |
| 2190 | case USB_SPEED_HIGH: |
| 2191 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; |
| 2192 | break; |
| 2193 | case USB_SPEED_SUPER: |
| 2194 | case USB_SPEED_UNKNOWN: |
| 2195 | case USB_SPEED_WIRELESS: |
| 2196 | /* Should never happen because only LS/FS/HS endpoints will get |
| 2197 | * added to the endpoint list. |
| 2198 | */ |
| 2199 | return; |
| 2200 | } |
| 2201 | if (tt_info) |
| 2202 | tt_info->active_eps -= 1; |
| 2203 | list_del_init(&virt_ep->bw_endpoint_list); |
| 2204 | } |
| 2205 | |
| 2206 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, |
| 2207 | struct xhci_bw_info *ep_bw, |
| 2208 | struct xhci_interval_bw_table *bw_table, |
| 2209 | struct usb_device *udev, |
| 2210 | struct xhci_virt_ep *virt_ep, |
| 2211 | struct xhci_tt_bw_info *tt_info) |
| 2212 | { |
| 2213 | struct xhci_interval_bw *interval_bw; |
| 2214 | struct xhci_virt_ep *smaller_ep; |
| 2215 | int normalized_interval; |
| 2216 | |
| 2217 | if (xhci_is_async_ep(ep_bw->type)) |
| 2218 | return; |
| 2219 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2220 | if (udev->speed == USB_SPEED_SUPER) { |
| 2221 | if (xhci_is_sync_in_ep(ep_bw->type)) |
| 2222 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += |
| 2223 | xhci_get_ss_bw_consumed(ep_bw); |
| 2224 | else |
| 2225 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += |
| 2226 | xhci_get_ss_bw_consumed(ep_bw); |
| 2227 | return; |
| 2228 | } |
| 2229 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2230 | /* For LS/FS devices, we need to translate the interval expressed in |
| 2231 | * microframes to frames. |
| 2232 | */ |
| 2233 | if (udev->speed == USB_SPEED_HIGH) |
| 2234 | normalized_interval = ep_bw->ep_interval; |
| 2235 | else |
| 2236 | normalized_interval = ep_bw->ep_interval - 3; |
| 2237 | |
| 2238 | if (normalized_interval == 0) |
| 2239 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; |
| 2240 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
| 2241 | interval_bw->num_packets += ep_bw->num_packets; |
| 2242 | switch (udev->speed) { |
| 2243 | case USB_SPEED_LOW: |
| 2244 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; |
| 2245 | break; |
| 2246 | case USB_SPEED_FULL: |
| 2247 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; |
| 2248 | break; |
| 2249 | case USB_SPEED_HIGH: |
| 2250 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; |
| 2251 | break; |
| 2252 | case USB_SPEED_SUPER: |
| 2253 | case USB_SPEED_UNKNOWN: |
| 2254 | case USB_SPEED_WIRELESS: |
| 2255 | /* Should never happen because only LS/FS/HS endpoints will get |
| 2256 | * added to the endpoint list. |
| 2257 | */ |
| 2258 | return; |
| 2259 | } |
| 2260 | |
| 2261 | if (tt_info) |
| 2262 | tt_info->active_eps += 1; |
| 2263 | /* Insert the endpoint into the list, largest max packet size first. */ |
| 2264 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, |
| 2265 | bw_endpoint_list) { |
| 2266 | if (ep_bw->max_packet_size >= |
| 2267 | smaller_ep->bw_info.max_packet_size) { |
| 2268 | /* Add the new ep before the smaller endpoint */ |
| 2269 | list_add_tail(&virt_ep->bw_endpoint_list, |
| 2270 | &smaller_ep->bw_endpoint_list); |
| 2271 | return; |
| 2272 | } |
| 2273 | } |
| 2274 | /* Add the new endpoint at the end of the list. */ |
| 2275 | list_add_tail(&virt_ep->bw_endpoint_list, |
| 2276 | &interval_bw->endpoints); |
| 2277 | } |
| 2278 | |
| 2279 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, |
| 2280 | struct xhci_virt_device *virt_dev, |
| 2281 | int old_active_eps) |
| 2282 | { |
| 2283 | struct xhci_root_port_bw_info *rh_bw_info; |
| 2284 | if (!virt_dev->tt_info) |
| 2285 | return; |
| 2286 | |
| 2287 | rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; |
| 2288 | if (old_active_eps == 0 && |
| 2289 | virt_dev->tt_info->active_eps != 0) { |
| 2290 | rh_bw_info->num_active_tts += 1; |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2291 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2292 | } else if (old_active_eps != 0 && |
| 2293 | virt_dev->tt_info->active_eps == 0) { |
| 2294 | rh_bw_info->num_active_tts -= 1; |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2295 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2296 | } |
| 2297 | } |
| 2298 | |
| 2299 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, |
| 2300 | struct xhci_virt_device *virt_dev, |
| 2301 | struct xhci_container_ctx *in_ctx) |
| 2302 | { |
| 2303 | struct xhci_bw_info ep_bw_info[31]; |
| 2304 | int i; |
| 2305 | struct xhci_input_control_ctx *ctrl_ctx; |
| 2306 | int old_active_eps = 0; |
| 2307 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2308 | if (virt_dev->tt_info) |
| 2309 | old_active_eps = virt_dev->tt_info->active_eps; |
| 2310 | |
| 2311 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
| 2312 | |
| 2313 | for (i = 0; i < 31; i++) { |
| 2314 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
| 2315 | continue; |
| 2316 | |
| 2317 | /* Make a copy of the BW info in case we need to revert this */ |
| 2318 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, |
| 2319 | sizeof(ep_bw_info[i])); |
| 2320 | /* Drop the endpoint from the interval table if the endpoint is |
| 2321 | * being dropped or changed. |
| 2322 | */ |
| 2323 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
| 2324 | xhci_drop_ep_from_interval_table(xhci, |
| 2325 | &virt_dev->eps[i].bw_info, |
| 2326 | virt_dev->bw_table, |
| 2327 | virt_dev->udev, |
| 2328 | &virt_dev->eps[i], |
| 2329 | virt_dev->tt_info); |
| 2330 | } |
| 2331 | /* Overwrite the information stored in the endpoints' bw_info */ |
| 2332 | xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); |
| 2333 | for (i = 0; i < 31; i++) { |
| 2334 | /* Add any changed or added endpoints to the interval table */ |
| 2335 | if (EP_IS_ADDED(ctrl_ctx, i)) |
| 2336 | xhci_add_ep_to_interval_table(xhci, |
| 2337 | &virt_dev->eps[i].bw_info, |
| 2338 | virt_dev->bw_table, |
| 2339 | virt_dev->udev, |
| 2340 | &virt_dev->eps[i], |
| 2341 | virt_dev->tt_info); |
| 2342 | } |
| 2343 | |
| 2344 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { |
| 2345 | /* Ok, this fits in the bandwidth we have. |
| 2346 | * Update the number of active TTs. |
| 2347 | */ |
| 2348 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
| 2349 | return 0; |
| 2350 | } |
| 2351 | |
| 2352 | /* We don't have enough bandwidth for this, revert the stored info. */ |
| 2353 | for (i = 0; i < 31; i++) { |
| 2354 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
| 2355 | continue; |
| 2356 | |
| 2357 | /* Drop the new copies of any added or changed endpoints from |
| 2358 | * the interval table. |
| 2359 | */ |
| 2360 | if (EP_IS_ADDED(ctrl_ctx, i)) { |
| 2361 | xhci_drop_ep_from_interval_table(xhci, |
| 2362 | &virt_dev->eps[i].bw_info, |
| 2363 | virt_dev->bw_table, |
| 2364 | virt_dev->udev, |
| 2365 | &virt_dev->eps[i], |
| 2366 | virt_dev->tt_info); |
| 2367 | } |
| 2368 | /* Revert the endpoint back to its old information */ |
| 2369 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], |
| 2370 | sizeof(ep_bw_info[i])); |
| 2371 | /* Add any changed or dropped endpoints back into the table */ |
| 2372 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
| 2373 | xhci_add_ep_to_interval_table(xhci, |
| 2374 | &virt_dev->eps[i].bw_info, |
| 2375 | virt_dev->bw_table, |
| 2376 | virt_dev->udev, |
| 2377 | &virt_dev->eps[i], |
| 2378 | virt_dev->tt_info); |
| 2379 | } |
| 2380 | return -ENOMEM; |
| 2381 | } |
| 2382 | |
| 2383 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2384 | /* Issue a configure endpoint command or evaluate context command |
| 2385 | * and wait for it to finish. |
| 2386 | */ |
| 2387 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2388 | struct usb_device *udev, |
| 2389 | struct xhci_command *command, |
| 2390 | bool ctx_change, bool must_succeed) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2391 | { |
| 2392 | int ret; |
| 2393 | int timeleft; |
| 2394 | unsigned long flags; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2395 | struct xhci_container_ctx *in_ctx; |
| 2396 | struct completion *cmd_completion; |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2397 | u32 *cmd_status; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2398 | struct xhci_virt_device *virt_dev; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2399 | |
| 2400 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2401 | virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2402 | |
Sarah Sharp | 750645f | 2011-09-02 11:05:43 -0700 | [diff] [blame] | 2403 | if (command) |
| 2404 | in_ctx = command->in_ctx; |
| 2405 | else |
| 2406 | in_ctx = virt_dev->in_ctx; |
| 2407 | |
| 2408 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
| 2409 | xhci_reserve_host_resources(xhci, in_ctx)) { |
| 2410 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2411 | xhci_warn(xhci, "Not enough host resources, " |
| 2412 | "active endpoint contexts = %u\n", |
| 2413 | xhci->num_active_eps); |
| 2414 | return -ENOMEM; |
| 2415 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2416 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && |
| 2417 | xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { |
| 2418 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
| 2419 | xhci_free_host_resources(xhci, in_ctx); |
| 2420 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2421 | xhci_warn(xhci, "Not enough bandwidth\n"); |
| 2422 | return -ENOMEM; |
| 2423 | } |
Sarah Sharp | 750645f | 2011-09-02 11:05:43 -0700 | [diff] [blame] | 2424 | |
| 2425 | if (command) { |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2426 | cmd_completion = command->completion; |
| 2427 | cmd_status = &command->status; |
| 2428 | command->command_trb = xhci->cmd_ring->enqueue; |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 2429 | |
| 2430 | /* Enqueue pointer can be left pointing to the link TRB, |
| 2431 | * we must handle that |
| 2432 | */ |
Matt Evans | f5960b6 | 2011-06-01 10:22:55 +1000 | [diff] [blame] | 2433 | if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 2434 | command->command_trb = |
| 2435 | xhci->cmd_ring->enq_seg->next->trbs; |
| 2436 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2437 | list_add_tail(&command->cmd_list, &virt_dev->cmd_list); |
| 2438 | } else { |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2439 | cmd_completion = &virt_dev->cmd_completion; |
| 2440 | cmd_status = &virt_dev->cmd_status; |
| 2441 | } |
Andiry Xu | 1d68064 | 2010-03-12 17:10:04 +0800 | [diff] [blame] | 2442 | init_completion(cmd_completion); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2443 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2444 | if (!ctx_change) |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2445 | ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, |
| 2446 | udev->slot_id, must_succeed); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2447 | else |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2448 | ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, |
Sarah Sharp | 4b26654 | 2012-05-07 15:34:26 -0700 | [diff] [blame] | 2449 | udev->slot_id, must_succeed); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2450 | if (ret < 0) { |
Sarah Sharp | c01591b | 2009-12-09 15:58:58 -0800 | [diff] [blame] | 2451 | if (command) |
| 2452 | list_del(&command->cmd_list); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2453 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
| 2454 | xhci_free_host_resources(xhci, in_ctx); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2455 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2456 | xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); |
| 2457 | return -ENOMEM; |
| 2458 | } |
| 2459 | xhci_ring_cmd_db(xhci); |
| 2460 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2461 | |
| 2462 | /* Wait for the configure endpoint command to complete */ |
| 2463 | timeleft = wait_for_completion_interruptible_timeout( |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2464 | cmd_completion, |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2465 | USB_CTRL_SET_TIMEOUT); |
| 2466 | if (timeleft <= 0) { |
| 2467 | xhci_warn(xhci, "%s while waiting for %s command\n", |
| 2468 | timeleft == 0 ? "Timeout" : "Signal", |
| 2469 | ctx_change == 0 ? |
| 2470 | "configure endpoint" : |
| 2471 | "evaluate context"); |
| 2472 | /* FIXME cancel the configure endpoint command */ |
| 2473 | return -ETIME; |
| 2474 | } |
| 2475 | |
| 2476 | if (!ctx_change) |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2477 | ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); |
| 2478 | else |
| 2479 | ret = xhci_evaluate_context_result(xhci, udev, cmd_status); |
| 2480 | |
| 2481 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 2482 | spin_lock_irqsave(&xhci->lock, flags); |
| 2483 | /* If the command failed, remove the reserved resources. |
| 2484 | * Otherwise, clean up the estimate to include dropped eps. |
| 2485 | */ |
| 2486 | if (ret) |
| 2487 | xhci_free_host_resources(xhci, in_ctx); |
| 2488 | else |
| 2489 | xhci_finish_resource_reservation(xhci, in_ctx); |
| 2490 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2491 | } |
| 2492 | return ret; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2493 | } |
| 2494 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 2495 | /* Called after one or more calls to xhci_add_endpoint() or |
| 2496 | * xhci_drop_endpoint(). If this call fails, the USB core is expected |
| 2497 | * to call xhci_reset_bandwidth(). |
| 2498 | * |
| 2499 | * Since we are in the middle of changing either configuration or |
| 2500 | * installing a new alt setting, the USB core won't allow URBs to be |
| 2501 | * enqueued for any endpoint on the old config or interface. Nothing |
| 2502 | * else should be touching the xhci->devs[slot_id] structure, so we |
| 2503 | * don't need to take the xhci->lock for manipulating that. |
| 2504 | */ |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2505 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
| 2506 | { |
| 2507 | int i; |
| 2508 | int ret = 0; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2509 | struct xhci_hcd *xhci; |
| 2510 | struct xhci_virt_device *virt_dev; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2511 | struct xhci_input_control_ctx *ctrl_ctx; |
| 2512 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2513 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 2514 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2515 | if (ret <= 0) |
| 2516 | return ret; |
| 2517 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 2518 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 2519 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2520 | |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 2521 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2522 | virt_dev = xhci->devs[udev->slot_id]; |
| 2523 | |
| 2524 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2525 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2526 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
| 2527 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
| 2528 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
Sarah Sharp | 2dc3753 | 2011-09-02 11:05:40 -0700 | [diff] [blame] | 2529 | |
| 2530 | /* Don't issue the command if there's no endpoints to update. */ |
| 2531 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && |
| 2532 | ctrl_ctx->drop_flags == 0) |
| 2533 | return 0; |
| 2534 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2535 | xhci_dbg(xhci, "New Input Control Context:\n"); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2536 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
| 2537 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2538 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2539 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2540 | ret = xhci_configure_endpoint(xhci, udev, NULL, |
| 2541 | false, false); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2542 | if (ret) { |
| 2543 | /* Callee should call reset_bandwidth() */ |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2544 | return ret; |
| 2545 | } |
| 2546 | |
| 2547 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2548 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2549 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2550 | |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2551 | /* Free any rings that were dropped, but not changed. */ |
| 2552 | for (i = 1; i < 31; ++i) { |
Matt Evans | 4819fef | 2011-06-01 13:01:07 +1000 | [diff] [blame] | 2553 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && |
| 2554 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2555 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
| 2556 | } |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2557 | xhci_zero_in_ctx(xhci, virt_dev); |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2558 | /* |
| 2559 | * Install any rings for completely new endpoints or changed endpoints, |
| 2560 | * and free or cache any old rings from changed endpoints. |
| 2561 | */ |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2562 | for (i = 1; i < 31; ++i) { |
Sarah Sharp | 74f9fe2 | 2009-12-03 09:44:29 -0800 | [diff] [blame] | 2563 | if (!virt_dev->eps[i].new_ring) |
| 2564 | continue; |
| 2565 | /* Only cache or free the old ring if it exists. |
| 2566 | * It may not if this is the first add of an endpoint. |
| 2567 | */ |
| 2568 | if (virt_dev->eps[i].ring) { |
Sarah Sharp | 412566b | 2009-12-09 15:59:01 -0800 | [diff] [blame] | 2569 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2570 | } |
Sarah Sharp | 74f9fe2 | 2009-12-03 09:44:29 -0800 | [diff] [blame] | 2571 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
| 2572 | virt_dev->eps[i].new_ring = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2573 | } |
| 2574 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2575 | return ret; |
| 2576 | } |
| 2577 | |
| 2578 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
| 2579 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2580 | struct xhci_hcd *xhci; |
| 2581 | struct xhci_virt_device *virt_dev; |
| 2582 | int i, ret; |
| 2583 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 2584 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2585 | if (ret <= 0) |
| 2586 | return; |
| 2587 | xhci = hcd_to_xhci(hcd); |
| 2588 | |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 2589 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2590 | virt_dev = xhci->devs[udev->slot_id]; |
| 2591 | /* Free any rings allocated for added endpoints */ |
| 2592 | for (i = 0; i < 31; ++i) { |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2593 | if (virt_dev->eps[i].new_ring) { |
| 2594 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
| 2595 | virt_dev->eps[i].new_ring = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2596 | } |
| 2597 | } |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2598 | xhci_zero_in_ctx(xhci, virt_dev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2599 | } |
| 2600 | |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2601 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2602 | struct xhci_container_ctx *in_ctx, |
| 2603 | struct xhci_container_ctx *out_ctx, |
| 2604 | u32 add_flags, u32 drop_flags) |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2605 | { |
| 2606 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2607 | ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2608 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
| 2609 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2610 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2611 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2612 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2613 | xhci_dbg(xhci, "Input Context:\n"); |
| 2614 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2615 | } |
| 2616 | |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 2617 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2618 | unsigned int slot_id, unsigned int ep_index, |
| 2619 | struct xhci_dequeue_state *deq_state) |
| 2620 | { |
| 2621 | struct xhci_container_ctx *in_ctx; |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2622 | struct xhci_ep_ctx *ep_ctx; |
| 2623 | u32 added_ctxs; |
| 2624 | dma_addr_t addr; |
| 2625 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2626 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
| 2627 | xhci->devs[slot_id]->out_ctx, ep_index); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2628 | in_ctx = xhci->devs[slot_id]->in_ctx; |
| 2629 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
| 2630 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, |
| 2631 | deq_state->new_deq_ptr); |
| 2632 | if (addr == 0) { |
| 2633 | xhci_warn(xhci, "WARN Cannot submit config ep after " |
| 2634 | "reset ep command\n"); |
| 2635 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", |
| 2636 | deq_state->new_deq_seg, |
| 2637 | deq_state->new_deq_ptr); |
| 2638 | return; |
| 2639 | } |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2640 | ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2641 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2642 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2643 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
| 2644 | xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2645 | } |
| 2646 | |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2647 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2648 | struct usb_device *udev, unsigned int ep_index) |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2649 | { |
| 2650 | struct xhci_dequeue_state deq_state; |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2651 | struct xhci_virt_ep *ep; |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2652 | |
| 2653 | xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2654 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2655 | /* We need to move the HW's dequeue pointer past this TD, |
| 2656 | * or it will attempt to resend it on the next doorbell ring. |
| 2657 | */ |
| 2658 | xhci_find_new_dequeue_state(xhci, udev->slot_id, |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 2659 | ep_index, ep->stopped_stream, ep->stopped_td, |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2660 | &deq_state); |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2661 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2662 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
| 2663 | * issue a configure endpoint command later. |
| 2664 | */ |
| 2665 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { |
| 2666 | xhci_dbg(xhci, "Queueing new dequeue state\n"); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2667 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 2668 | ep_index, ep->stopped_stream, &deq_state); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2669 | } else { |
| 2670 | /* Better hope no one uses the input context between now and the |
| 2671 | * reset endpoint completion! |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 2672 | * XXX: No idea how this hardware will react when stream rings |
| 2673 | * are enabled. |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2674 | */ |
| 2675 | xhci_dbg(xhci, "Setting up input context for " |
| 2676 | "configure endpoint command\n"); |
| 2677 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, |
| 2678 | ep_index, &deq_state); |
| 2679 | } |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2680 | } |
| 2681 | |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2682 | /* Deal with stalled endpoints. The core should have sent the control message |
| 2683 | * to clear the halt condition. However, we need to make the xHCI hardware |
| 2684 | * reset its sequence number, since a device will expect a sequence number of |
| 2685 | * zero after the halt condition is cleared. |
| 2686 | * Context: in_interrupt |
| 2687 | */ |
| 2688 | void xhci_endpoint_reset(struct usb_hcd *hcd, |
| 2689 | struct usb_host_endpoint *ep) |
| 2690 | { |
| 2691 | struct xhci_hcd *xhci; |
| 2692 | struct usb_device *udev; |
| 2693 | unsigned int ep_index; |
| 2694 | unsigned long flags; |
| 2695 | int ret; |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2696 | struct xhci_virt_ep *virt_ep; |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2697 | |
| 2698 | xhci = hcd_to_xhci(hcd); |
| 2699 | udev = (struct usb_device *) ep->hcpriv; |
| 2700 | /* Called with a root hub endpoint (or an endpoint that wasn't added |
| 2701 | * with xhci_add_endpoint() |
| 2702 | */ |
| 2703 | if (!ep->hcpriv) |
| 2704 | return; |
| 2705 | ep_index = xhci_get_endpoint_index(&ep->desc); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2706 | virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
| 2707 | if (!virt_ep->stopped_td) { |
Sarah Sharp | c92bcfa | 2009-07-27 12:05:21 -0700 | [diff] [blame] | 2708 | xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", |
| 2709 | ep->desc.bEndpointAddress); |
| 2710 | return; |
| 2711 | } |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2712 | if (usb_endpoint_xfer_control(&ep->desc)) { |
| 2713 | xhci_dbg(xhci, "Control endpoint stall already handled.\n"); |
| 2714 | return; |
| 2715 | } |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2716 | |
| 2717 | xhci_dbg(xhci, "Queueing reset endpoint command\n"); |
| 2718 | spin_lock_irqsave(&xhci->lock, flags); |
| 2719 | ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); |
Sarah Sharp | c92bcfa | 2009-07-27 12:05:21 -0700 | [diff] [blame] | 2720 | /* |
| 2721 | * Can't change the ring dequeue pointer until it's transitioned to the |
| 2722 | * stopped state, which is only upon a successful reset endpoint |
| 2723 | * command. Better hope that last command worked! |
| 2724 | */ |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2725 | if (!ret) { |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2726 | xhci_cleanup_stalled_ring(xhci, udev, ep_index); |
| 2727 | kfree(virt_ep->stopped_td); |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2728 | xhci_ring_cmd_db(xhci); |
| 2729 | } |
Sarah Sharp | 1624ae1 | 2010-05-06 13:40:08 -0700 | [diff] [blame] | 2730 | virt_ep->stopped_td = NULL; |
| 2731 | virt_ep->stopped_trb = NULL; |
Sarah Sharp | 5e5cf6f | 2010-05-06 13:40:18 -0700 | [diff] [blame] | 2732 | virt_ep->stopped_stream = 0; |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2733 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2734 | |
| 2735 | if (ret) |
| 2736 | xhci_warn(xhci, "FIXME allocate a new ring segment\n"); |
| 2737 | } |
| 2738 | |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2739 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
| 2740 | struct usb_device *udev, struct usb_host_endpoint *ep, |
| 2741 | unsigned int slot_id) |
| 2742 | { |
| 2743 | int ret; |
| 2744 | unsigned int ep_index; |
| 2745 | unsigned int ep_state; |
| 2746 | |
| 2747 | if (!ep) |
| 2748 | return -EINVAL; |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 2749 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2750 | if (ret <= 0) |
| 2751 | return -EINVAL; |
Alan Stern | 842f169 | 2010-04-30 12:44:46 -0400 | [diff] [blame] | 2752 | if (ep->ss_ep_comp.bmAttributes == 0) { |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2753 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
| 2754 | " descriptor for ep 0x%x does not support streams\n", |
| 2755 | ep->desc.bEndpointAddress); |
| 2756 | return -EINVAL; |
| 2757 | } |
| 2758 | |
| 2759 | ep_index = xhci_get_endpoint_index(&ep->desc); |
| 2760 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
| 2761 | if (ep_state & EP_HAS_STREAMS || |
| 2762 | ep_state & EP_GETTING_STREAMS) { |
| 2763 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " |
| 2764 | "already has streams set up.\n", |
| 2765 | ep->desc.bEndpointAddress); |
| 2766 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " |
| 2767 | "dynamic stream context array reallocation.\n"); |
| 2768 | return -EINVAL; |
| 2769 | } |
| 2770 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { |
| 2771 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " |
| 2772 | "endpoint 0x%x; URBs are pending.\n", |
| 2773 | ep->desc.bEndpointAddress); |
| 2774 | return -EINVAL; |
| 2775 | } |
| 2776 | return 0; |
| 2777 | } |
| 2778 | |
| 2779 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, |
| 2780 | unsigned int *num_streams, unsigned int *num_stream_ctxs) |
| 2781 | { |
| 2782 | unsigned int max_streams; |
| 2783 | |
| 2784 | /* The stream context array size must be a power of two */ |
| 2785 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); |
| 2786 | /* |
| 2787 | * Find out how many primary stream array entries the host controller |
| 2788 | * supports. Later we may use secondary stream arrays (similar to 2nd |
| 2789 | * level page entries), but that's an optional feature for xHCI host |
| 2790 | * controllers. xHCs must support at least 4 stream IDs. |
| 2791 | */ |
| 2792 | max_streams = HCC_MAX_PSA(xhci->hcc_params); |
| 2793 | if (*num_stream_ctxs > max_streams) { |
| 2794 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", |
| 2795 | max_streams); |
| 2796 | *num_stream_ctxs = max_streams; |
| 2797 | *num_streams = max_streams; |
| 2798 | } |
| 2799 | } |
| 2800 | |
| 2801 | /* Returns an error code if one of the endpoint already has streams. |
| 2802 | * This does not change any data structures, it only checks and gathers |
| 2803 | * information. |
| 2804 | */ |
| 2805 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, |
| 2806 | struct usb_device *udev, |
| 2807 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 2808 | unsigned int *num_streams, u32 *changed_ep_bitmask) |
| 2809 | { |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2810 | unsigned int max_streams; |
| 2811 | unsigned int endpoint_flag; |
| 2812 | int i; |
| 2813 | int ret; |
| 2814 | |
| 2815 | for (i = 0; i < num_eps; i++) { |
| 2816 | ret = xhci_check_streams_endpoint(xhci, udev, |
| 2817 | eps[i], udev->slot_id); |
| 2818 | if (ret < 0) |
| 2819 | return ret; |
| 2820 | |
Felipe Balbi | 18b7ede | 2012-01-02 13:35:41 +0200 | [diff] [blame] | 2821 | max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2822 | if (max_streams < (*num_streams - 1)) { |
| 2823 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", |
| 2824 | eps[i]->desc.bEndpointAddress, |
| 2825 | max_streams); |
| 2826 | *num_streams = max_streams+1; |
| 2827 | } |
| 2828 | |
| 2829 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); |
| 2830 | if (*changed_ep_bitmask & endpoint_flag) |
| 2831 | return -EINVAL; |
| 2832 | *changed_ep_bitmask |= endpoint_flag; |
| 2833 | } |
| 2834 | return 0; |
| 2835 | } |
| 2836 | |
| 2837 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, |
| 2838 | struct usb_device *udev, |
| 2839 | struct usb_host_endpoint **eps, unsigned int num_eps) |
| 2840 | { |
| 2841 | u32 changed_ep_bitmask = 0; |
| 2842 | unsigned int slot_id; |
| 2843 | unsigned int ep_index; |
| 2844 | unsigned int ep_state; |
| 2845 | int i; |
| 2846 | |
| 2847 | slot_id = udev->slot_id; |
| 2848 | if (!xhci->devs[slot_id]) |
| 2849 | return 0; |
| 2850 | |
| 2851 | for (i = 0; i < num_eps; i++) { |
| 2852 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2853 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
| 2854 | /* Are streams already being freed for the endpoint? */ |
| 2855 | if (ep_state & EP_GETTING_NO_STREAMS) { |
| 2856 | xhci_warn(xhci, "WARN Can't disable streams for " |
| 2857 | "endpoint 0x%x\n, " |
| 2858 | "streams are being disabled already.", |
| 2859 | eps[i]->desc.bEndpointAddress); |
| 2860 | return 0; |
| 2861 | } |
| 2862 | /* Are there actually any streams to free? */ |
| 2863 | if (!(ep_state & EP_HAS_STREAMS) && |
| 2864 | !(ep_state & EP_GETTING_STREAMS)) { |
| 2865 | xhci_warn(xhci, "WARN Can't disable streams for " |
| 2866 | "endpoint 0x%x\n, " |
| 2867 | "streams are already disabled!", |
| 2868 | eps[i]->desc.bEndpointAddress); |
| 2869 | xhci_warn(xhci, "WARN xhci_free_streams() called " |
| 2870 | "with non-streams endpoint\n"); |
| 2871 | return 0; |
| 2872 | } |
| 2873 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); |
| 2874 | } |
| 2875 | return changed_ep_bitmask; |
| 2876 | } |
| 2877 | |
| 2878 | /* |
| 2879 | * The USB device drivers use this function (though the HCD interface in USB |
| 2880 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to |
| 2881 | * coordinate mass storage command queueing across multiple endpoints (basically |
| 2882 | * a stream ID == a task ID). |
| 2883 | * |
| 2884 | * Setting up streams involves allocating the same size stream context array |
| 2885 | * for each endpoint and issuing a configure endpoint command for all endpoints. |
| 2886 | * |
| 2887 | * Don't allow the call to succeed if one endpoint only supports one stream |
| 2888 | * (which means it doesn't support streams at all). |
| 2889 | * |
| 2890 | * Drivers may get less stream IDs than they asked for, if the host controller |
| 2891 | * hardware or endpoints claim they can't support the number of requested |
| 2892 | * stream IDs. |
| 2893 | */ |
| 2894 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, |
| 2895 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 2896 | unsigned int num_streams, gfp_t mem_flags) |
| 2897 | { |
| 2898 | int i, ret; |
| 2899 | struct xhci_hcd *xhci; |
| 2900 | struct xhci_virt_device *vdev; |
| 2901 | struct xhci_command *config_cmd; |
| 2902 | unsigned int ep_index; |
| 2903 | unsigned int num_stream_ctxs; |
| 2904 | unsigned long flags; |
| 2905 | u32 changed_ep_bitmask = 0; |
| 2906 | |
| 2907 | if (!eps) |
| 2908 | return -EINVAL; |
| 2909 | |
| 2910 | /* Add one to the number of streams requested to account for |
| 2911 | * stream 0 that is reserved for xHCI usage. |
| 2912 | */ |
| 2913 | num_streams += 1; |
| 2914 | xhci = hcd_to_xhci(hcd); |
| 2915 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", |
| 2916 | num_streams); |
| 2917 | |
| 2918 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); |
| 2919 | if (!config_cmd) { |
| 2920 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); |
| 2921 | return -ENOMEM; |
| 2922 | } |
| 2923 | |
| 2924 | /* Check to make sure all endpoints are not already configured for |
| 2925 | * streams. While we're at it, find the maximum number of streams that |
| 2926 | * all the endpoints will support and check for duplicate endpoints. |
| 2927 | */ |
| 2928 | spin_lock_irqsave(&xhci->lock, flags); |
| 2929 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, |
| 2930 | num_eps, &num_streams, &changed_ep_bitmask); |
| 2931 | if (ret < 0) { |
| 2932 | xhci_free_command(xhci, config_cmd); |
| 2933 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2934 | return ret; |
| 2935 | } |
| 2936 | if (num_streams <= 1) { |
| 2937 | xhci_warn(xhci, "WARN: endpoints can't handle " |
| 2938 | "more than one stream.\n"); |
| 2939 | xhci_free_command(xhci, config_cmd); |
| 2940 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2941 | return -EINVAL; |
| 2942 | } |
| 2943 | vdev = xhci->devs[udev->slot_id]; |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 2944 | /* Mark each endpoint as being in transition, so |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 2945 | * xhci_urb_enqueue() will reject all URBs. |
| 2946 | */ |
| 2947 | for (i = 0; i < num_eps; i++) { |
| 2948 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2949 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; |
| 2950 | } |
| 2951 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2952 | |
| 2953 | /* Setup internal data structures and allocate HW data structures for |
| 2954 | * streams (but don't install the HW structures in the input context |
| 2955 | * until we're sure all memory allocation succeeded). |
| 2956 | */ |
| 2957 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); |
| 2958 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", |
| 2959 | num_stream_ctxs, num_streams); |
| 2960 | |
| 2961 | for (i = 0; i < num_eps; i++) { |
| 2962 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2963 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, |
| 2964 | num_stream_ctxs, |
| 2965 | num_streams, mem_flags); |
| 2966 | if (!vdev->eps[ep_index].stream_info) |
| 2967 | goto cleanup; |
| 2968 | /* Set maxPstreams in endpoint context and update deq ptr to |
| 2969 | * point to stream context array. FIXME |
| 2970 | */ |
| 2971 | } |
| 2972 | |
| 2973 | /* Set up the input context for a configure endpoint command. */ |
| 2974 | for (i = 0; i < num_eps; i++) { |
| 2975 | struct xhci_ep_ctx *ep_ctx; |
| 2976 | |
| 2977 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 2978 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); |
| 2979 | |
| 2980 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, |
| 2981 | vdev->out_ctx, ep_index); |
| 2982 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, |
| 2983 | vdev->eps[ep_index].stream_info); |
| 2984 | } |
| 2985 | /* Tell the HW to drop its old copy of the endpoint context info |
| 2986 | * and add the updated copy from the input context. |
| 2987 | */ |
| 2988 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, |
| 2989 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); |
| 2990 | |
| 2991 | /* Issue and wait for the configure endpoint command */ |
| 2992 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, |
| 2993 | false, false); |
| 2994 | |
| 2995 | /* xHC rejected the configure endpoint command for some reason, so we |
| 2996 | * leave the old ring intact and free our internal streams data |
| 2997 | * structure. |
| 2998 | */ |
| 2999 | if (ret < 0) |
| 3000 | goto cleanup; |
| 3001 | |
| 3002 | spin_lock_irqsave(&xhci->lock, flags); |
| 3003 | for (i = 0; i < num_eps; i++) { |
| 3004 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3005 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
| 3006 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", |
| 3007 | udev->slot_id, ep_index); |
| 3008 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; |
| 3009 | } |
| 3010 | xhci_free_command(xhci, config_cmd); |
| 3011 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3012 | |
| 3013 | /* Subtract 1 for stream 0, which drivers can't use */ |
| 3014 | return num_streams - 1; |
| 3015 | |
| 3016 | cleanup: |
| 3017 | /* If it didn't work, free the streams! */ |
| 3018 | for (i = 0; i < num_eps; i++) { |
| 3019 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3020 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); |
Sarah Sharp | 8a00774 | 2010-04-30 15:37:56 -0700 | [diff] [blame] | 3021 | vdev->eps[ep_index].stream_info = NULL; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3022 | /* FIXME Unset maxPstreams in endpoint context and |
| 3023 | * update deq ptr to point to normal string ring. |
| 3024 | */ |
| 3025 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
| 3026 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
| 3027 | xhci_endpoint_zero(xhci, vdev, eps[i]); |
| 3028 | } |
| 3029 | xhci_free_command(xhci, config_cmd); |
| 3030 | return -ENOMEM; |
| 3031 | } |
| 3032 | |
| 3033 | /* Transition the endpoint from using streams to being a "normal" endpoint |
| 3034 | * without streams. |
| 3035 | * |
| 3036 | * Modify the endpoint context state, submit a configure endpoint command, |
| 3037 | * and free all endpoint rings for streams if that completes successfully. |
| 3038 | */ |
| 3039 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, |
| 3040 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 3041 | gfp_t mem_flags) |
| 3042 | { |
| 3043 | int i, ret; |
| 3044 | struct xhci_hcd *xhci; |
| 3045 | struct xhci_virt_device *vdev; |
| 3046 | struct xhci_command *command; |
| 3047 | unsigned int ep_index; |
| 3048 | unsigned long flags; |
| 3049 | u32 changed_ep_bitmask; |
| 3050 | |
| 3051 | xhci = hcd_to_xhci(hcd); |
| 3052 | vdev = xhci->devs[udev->slot_id]; |
| 3053 | |
| 3054 | /* Set up a configure endpoint command to remove the streams rings */ |
| 3055 | spin_lock_irqsave(&xhci->lock, flags); |
| 3056 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, |
| 3057 | udev, eps, num_eps); |
| 3058 | if (changed_ep_bitmask == 0) { |
| 3059 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3060 | return -EINVAL; |
| 3061 | } |
| 3062 | |
| 3063 | /* Use the xhci_command structure from the first endpoint. We may have |
| 3064 | * allocated too many, but the driver may call xhci_free_streams() for |
| 3065 | * each endpoint it grouped into one call to xhci_alloc_streams(). |
| 3066 | */ |
| 3067 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); |
| 3068 | command = vdev->eps[ep_index].stream_info->free_streams_command; |
| 3069 | for (i = 0; i < num_eps; i++) { |
| 3070 | struct xhci_ep_ctx *ep_ctx; |
| 3071 | |
| 3072 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3073 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); |
| 3074 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= |
| 3075 | EP_GETTING_NO_STREAMS; |
| 3076 | |
| 3077 | xhci_endpoint_copy(xhci, command->in_ctx, |
| 3078 | vdev->out_ctx, ep_index); |
| 3079 | xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, |
| 3080 | &vdev->eps[ep_index]); |
| 3081 | } |
| 3082 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, |
| 3083 | vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); |
| 3084 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3085 | |
| 3086 | /* Issue and wait for the configure endpoint command, |
| 3087 | * which must succeed. |
| 3088 | */ |
| 3089 | ret = xhci_configure_endpoint(xhci, udev, command, |
| 3090 | false, true); |
| 3091 | |
| 3092 | /* xHC rejected the configure endpoint command for some reason, so we |
| 3093 | * leave the streams rings intact. |
| 3094 | */ |
| 3095 | if (ret < 0) |
| 3096 | return ret; |
| 3097 | |
| 3098 | spin_lock_irqsave(&xhci->lock, flags); |
| 3099 | for (i = 0; i < num_eps; i++) { |
| 3100 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3101 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); |
Sarah Sharp | 8a00774 | 2010-04-30 15:37:56 -0700 | [diff] [blame] | 3102 | vdev->eps[ep_index].stream_info = NULL; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3103 | /* FIXME Unset maxPstreams in endpoint context and |
| 3104 | * update deq ptr to point to normal string ring. |
| 3105 | */ |
| 3106 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; |
| 3107 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
| 3108 | } |
| 3109 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3110 | |
| 3111 | return 0; |
| 3112 | } |
| 3113 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3114 | /* |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3115 | * Deletes endpoint resources for endpoints that were active before a Reset |
| 3116 | * Device command, or a Disable Slot command. The Reset Device command leaves |
| 3117 | * the control endpoint intact, whereas the Disable Slot command deletes it. |
| 3118 | * |
| 3119 | * Must be called with xhci->lock held. |
| 3120 | */ |
| 3121 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, |
| 3122 | struct xhci_virt_device *virt_dev, bool drop_control_ep) |
| 3123 | { |
| 3124 | int i; |
| 3125 | unsigned int num_dropped_eps = 0; |
| 3126 | unsigned int drop_flags = 0; |
| 3127 | |
| 3128 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { |
| 3129 | if (virt_dev->eps[i].ring) { |
| 3130 | drop_flags |= 1 << i; |
| 3131 | num_dropped_eps++; |
| 3132 | } |
| 3133 | } |
| 3134 | xhci->num_active_eps -= num_dropped_eps; |
| 3135 | if (num_dropped_eps) |
| 3136 | xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " |
| 3137 | "%u now active.\n", |
| 3138 | num_dropped_eps, drop_flags, |
| 3139 | xhci->num_active_eps); |
| 3140 | } |
| 3141 | |
| 3142 | /* |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3143 | * This submits a Reset Device Command, which will set the device state to 0, |
| 3144 | * set the device address to 0, and disable all the endpoints except the default |
| 3145 | * control endpoint. The USB core should come back and call |
| 3146 | * xhci_address_device(), and then re-set up the configuration. If this is |
| 3147 | * called because of a usb_reset_and_verify_device(), then the old alternate |
| 3148 | * settings will be re-installed through the normal bandwidth allocation |
| 3149 | * functions. |
| 3150 | * |
| 3151 | * Wait for the Reset Device command to finish. Remove all structures |
| 3152 | * associated with the endpoints that were disabled. Clear the input device |
| 3153 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3154 | * |
| 3155 | * If the virt_dev to be reset does not exist or does not match the udev, |
| 3156 | * it means the device is lost, possibly due to the xHC restore error and |
| 3157 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to |
| 3158 | * re-allocate the device. |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3159 | */ |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3160 | int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3161 | { |
| 3162 | int ret, i; |
| 3163 | unsigned long flags; |
| 3164 | struct xhci_hcd *xhci; |
| 3165 | unsigned int slot_id; |
| 3166 | struct xhci_virt_device *virt_dev; |
| 3167 | struct xhci_command *reset_device_cmd; |
| 3168 | int timeleft; |
| 3169 | int last_freed_endpoint; |
Maarten Lankhorst | 001fd38 | 2011-06-01 23:27:50 +0200 | [diff] [blame] | 3170 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3171 | int old_active_eps = 0; |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3172 | |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3173 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3174 | if (ret <= 0) |
| 3175 | return ret; |
| 3176 | xhci = hcd_to_xhci(hcd); |
| 3177 | slot_id = udev->slot_id; |
| 3178 | virt_dev = xhci->devs[slot_id]; |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3179 | if (!virt_dev) { |
| 3180 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
| 3181 | "not exist. Re-allocate the device\n", slot_id); |
| 3182 | ret = xhci_alloc_dev(hcd, udev); |
| 3183 | if (ret == 1) |
| 3184 | return 0; |
| 3185 | else |
| 3186 | return -EINVAL; |
| 3187 | } |
| 3188 | |
| 3189 | if (virt_dev->udev != udev) { |
| 3190 | /* If the virt_dev and the udev does not match, this virt_dev |
| 3191 | * may belong to another udev. |
| 3192 | * Re-allocate the device. |
| 3193 | */ |
| 3194 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
| 3195 | "not match the udev. Re-allocate the device\n", |
| 3196 | slot_id); |
| 3197 | ret = xhci_alloc_dev(hcd, udev); |
| 3198 | if (ret == 1) |
| 3199 | return 0; |
| 3200 | else |
| 3201 | return -EINVAL; |
| 3202 | } |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3203 | |
Maarten Lankhorst | 001fd38 | 2011-06-01 23:27:50 +0200 | [diff] [blame] | 3204 | /* If device is not setup, there is no point in resetting it */ |
| 3205 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
| 3206 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
| 3207 | SLOT_STATE_DISABLED) |
| 3208 | return 0; |
| 3209 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3210 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); |
| 3211 | /* Allocate the command structure that holds the struct completion. |
| 3212 | * Assume we're in process context, since the normal device reset |
| 3213 | * process has to wait for the device anyway. Storage devices are |
| 3214 | * reset as part of error handling, so use GFP_NOIO instead of |
| 3215 | * GFP_KERNEL. |
| 3216 | */ |
| 3217 | reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); |
| 3218 | if (!reset_device_cmd) { |
| 3219 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); |
| 3220 | return -ENOMEM; |
| 3221 | } |
| 3222 | |
| 3223 | /* Attempt to submit the Reset Device command to the command ring */ |
| 3224 | spin_lock_irqsave(&xhci->lock, flags); |
| 3225 | reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 3226 | |
| 3227 | /* Enqueue pointer can be left pointing to the link TRB, |
| 3228 | * we must handle that |
| 3229 | */ |
Matt Evans | f5960b6 | 2011-06-01 10:22:55 +1000 | [diff] [blame] | 3230 | if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 3231 | reset_device_cmd->command_trb = |
| 3232 | xhci->cmd_ring->enq_seg->next->trbs; |
| 3233 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3234 | list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); |
| 3235 | ret = xhci_queue_reset_device(xhci, slot_id); |
| 3236 | if (ret) { |
| 3237 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3238 | list_del(&reset_device_cmd->cmd_list); |
| 3239 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3240 | goto command_cleanup; |
| 3241 | } |
| 3242 | xhci_ring_cmd_db(xhci); |
| 3243 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3244 | |
| 3245 | /* Wait for the Reset Device command to finish */ |
| 3246 | timeleft = wait_for_completion_interruptible_timeout( |
| 3247 | reset_device_cmd->completion, |
| 3248 | USB_CTRL_SET_TIMEOUT); |
| 3249 | if (timeleft <= 0) { |
| 3250 | xhci_warn(xhci, "%s while waiting for reset device command\n", |
| 3251 | timeleft == 0 ? "Timeout" : "Signal"); |
| 3252 | spin_lock_irqsave(&xhci->lock, flags); |
| 3253 | /* The timeout might have raced with the event ring handler, so |
| 3254 | * only delete from the list if the item isn't poisoned. |
| 3255 | */ |
| 3256 | if (reset_device_cmd->cmd_list.next != LIST_POISON1) |
| 3257 | list_del(&reset_device_cmd->cmd_list); |
| 3258 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3259 | ret = -ETIME; |
| 3260 | goto command_cleanup; |
| 3261 | } |
| 3262 | |
| 3263 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, |
| 3264 | * unless we tried to reset a slot ID that wasn't enabled, |
| 3265 | * or the device wasn't in the addressed or configured state. |
| 3266 | */ |
| 3267 | ret = reset_device_cmd->status; |
| 3268 | switch (ret) { |
| 3269 | case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ |
| 3270 | case COMP_CTX_STATE: /* 0.96 completion code for same thing */ |
| 3271 | xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", |
| 3272 | slot_id, |
| 3273 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); |
| 3274 | xhci_info(xhci, "Not freeing device rings.\n"); |
| 3275 | /* Don't treat this as an error. May change my mind later. */ |
| 3276 | ret = 0; |
| 3277 | goto command_cleanup; |
| 3278 | case COMP_SUCCESS: |
| 3279 | xhci_dbg(xhci, "Successful reset device command.\n"); |
| 3280 | break; |
| 3281 | default: |
| 3282 | if (xhci_is_vendor_info_code(xhci, ret)) |
| 3283 | break; |
| 3284 | xhci_warn(xhci, "Unknown completion code %u for " |
| 3285 | "reset device command.\n", ret); |
| 3286 | ret = -EINVAL; |
| 3287 | goto command_cleanup; |
| 3288 | } |
| 3289 | |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3290 | /* Free up host controller endpoint resources */ |
| 3291 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 3292 | spin_lock_irqsave(&xhci->lock, flags); |
| 3293 | /* Don't delete the default control endpoint resources */ |
| 3294 | xhci_free_device_endpoint_resources(xhci, virt_dev, false); |
| 3295 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3296 | } |
| 3297 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3298 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ |
| 3299 | last_freed_endpoint = 1; |
| 3300 | for (i = 1; i < 31; ++i) { |
Dmitry Torokhov | 2dea75d | 2011-04-12 23:06:28 -0700 | [diff] [blame] | 3301 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
| 3302 | |
| 3303 | if (ep->ep_state & EP_HAS_STREAMS) { |
| 3304 | xhci_free_stream_info(xhci, ep->stream_info); |
| 3305 | ep->stream_info = NULL; |
| 3306 | ep->ep_state &= ~EP_HAS_STREAMS; |
| 3307 | } |
| 3308 | |
| 3309 | if (ep->ring) { |
| 3310 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
| 3311 | last_freed_endpoint = i; |
| 3312 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3313 | if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) |
| 3314 | xhci_drop_ep_from_interval_table(xhci, |
| 3315 | &virt_dev->eps[i].bw_info, |
| 3316 | virt_dev->bw_table, |
| 3317 | udev, |
| 3318 | &virt_dev->eps[i], |
| 3319 | virt_dev->tt_info); |
Sarah Sharp | 9af5d71 | 2011-09-02 11:05:48 -0700 | [diff] [blame] | 3320 | xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3321 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3322 | /* If necessary, update the number of active TTs on this root port */ |
| 3323 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
| 3324 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3325 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); |
| 3326 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); |
| 3327 | ret = 0; |
| 3328 | |
| 3329 | command_cleanup: |
| 3330 | xhci_free_command(xhci, reset_device_cmd); |
| 3331 | return ret; |
| 3332 | } |
| 3333 | |
| 3334 | /* |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3335 | * At this point, the struct usb_device is about to go away, the device has |
| 3336 | * disconnected, and all traffic has been stopped and the endpoints have been |
| 3337 | * disabled. Free any HC data structures associated with that device. |
| 3338 | */ |
| 3339 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
| 3340 | { |
| 3341 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3342 | struct xhci_virt_device *virt_dev; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3343 | unsigned long flags; |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3344 | u32 state; |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3345 | int i, ret; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3346 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3347 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 3348 | /* If the host is halted due to driver unload, we still need to free the |
| 3349 | * device. |
| 3350 | */ |
| 3351 | if (ret <= 0 && ret != -ENODEV) |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3352 | return; |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3353 | |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3354 | virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3355 | |
| 3356 | /* Stop any wayward timer functions (which may grab the lock) */ |
| 3357 | for (i = 0; i < 31; ++i) { |
| 3358 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; |
| 3359 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
| 3360 | } |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3361 | |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 3362 | if (udev->usb2_hw_lpm_enabled) { |
| 3363 | xhci_set_usb2_hardware_lpm(hcd, udev, 0); |
| 3364 | udev->usb2_hw_lpm_enabled = 0; |
| 3365 | } |
| 3366 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3367 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3368 | /* Don't disable the slot if the host controller is dead. */ |
| 3369 | state = xhci_readl(xhci, &xhci->op_regs->status); |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 3370 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
| 3371 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3372 | xhci_free_virt_device(xhci, udev->slot_id); |
| 3373 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3374 | return; |
| 3375 | } |
| 3376 | |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3377 | if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3378 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3379 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3380 | return; |
| 3381 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3382 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3383 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3384 | /* |
| 3385 | * Event command completion handler will free any data structures |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 3386 | * associated with the slot. XXX Can free sleep? |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3387 | */ |
| 3388 | } |
| 3389 | |
| 3390 | /* |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3391 | * Checks if we have enough host controller resources for the default control |
| 3392 | * endpoint. |
| 3393 | * |
| 3394 | * Must be called with xhci->lock held. |
| 3395 | */ |
| 3396 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) |
| 3397 | { |
| 3398 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { |
| 3399 | xhci_dbg(xhci, "Not enough ep ctxs: " |
| 3400 | "%u active, need to add 1, limit is %u.\n", |
| 3401 | xhci->num_active_eps, xhci->limit_active_eps); |
| 3402 | return -ENOMEM; |
| 3403 | } |
| 3404 | xhci->num_active_eps += 1; |
| 3405 | xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", |
| 3406 | xhci->num_active_eps); |
| 3407 | return 0; |
| 3408 | } |
| 3409 | |
| 3410 | |
| 3411 | /* |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3412 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command |
| 3413 | * timed out, or allocating memory failed. Returns 1 on success. |
| 3414 | */ |
| 3415 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) |
| 3416 | { |
| 3417 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 3418 | unsigned long flags; |
| 3419 | int timeleft; |
| 3420 | int ret; |
| 3421 | |
| 3422 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3423 | ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3424 | if (ret) { |
| 3425 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3426 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3427 | return 0; |
| 3428 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3429 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3430 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3431 | |
| 3432 | /* XXX: how much time for xHC slot assignment? */ |
| 3433 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, |
| 3434 | USB_CTRL_SET_TIMEOUT); |
| 3435 | if (timeleft <= 0) { |
| 3436 | xhci_warn(xhci, "%s while waiting for a slot\n", |
| 3437 | timeleft == 0 ? "Timeout" : "Signal"); |
| 3438 | /* FIXME cancel the enable slot request */ |
| 3439 | return 0; |
| 3440 | } |
| 3441 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3442 | if (!xhci->slot_id) { |
| 3443 | xhci_err(xhci, "Error while assigning device slot ID\n"); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3444 | return 0; |
| 3445 | } |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3446 | |
| 3447 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 3448 | spin_lock_irqsave(&xhci->lock, flags); |
| 3449 | ret = xhci_reserve_host_control_ep_resources(xhci); |
| 3450 | if (ret) { |
| 3451 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3452 | xhci_warn(xhci, "Not enough host resources, " |
| 3453 | "active endpoint contexts = %u\n", |
| 3454 | xhci->num_active_eps); |
| 3455 | goto disable_slot; |
| 3456 | } |
| 3457 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3458 | } |
| 3459 | /* Use GFP_NOIO, since this function can be called from |
Sarah Sharp | a6d940d | 2010-12-28 13:08:42 -0800 | [diff] [blame] | 3460 | * xhci_discover_or_reset_device(), which may be called as part of |
| 3461 | * mass storage driver error handling. |
| 3462 | */ |
| 3463 | if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3464 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3465 | goto disable_slot; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3466 | } |
| 3467 | udev->slot_id = xhci->slot_id; |
| 3468 | /* Is this a LS or FS device under a HS hub? */ |
| 3469 | /* Hub or peripherial? */ |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3470 | return 1; |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3471 | |
| 3472 | disable_slot: |
| 3473 | /* Disable slot, if we can do it without mem alloc */ |
| 3474 | spin_lock_irqsave(&xhci->lock, flags); |
| 3475 | if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) |
| 3476 | xhci_ring_cmd_db(xhci); |
| 3477 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3478 | return 0; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3479 | } |
| 3480 | |
| 3481 | /* |
| 3482 | * Issue an Address Device command (which will issue a SetAddress request to |
| 3483 | * the device). |
| 3484 | * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so |
| 3485 | * we should only issue and wait on one address command at the same time. |
| 3486 | * |
| 3487 | * We add one to the device address issued by the hardware because the USB core |
| 3488 | * uses address 1 for the root hubs (even though they're not really devices). |
| 3489 | */ |
| 3490 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) |
| 3491 | { |
| 3492 | unsigned long flags; |
| 3493 | int timeleft; |
| 3494 | struct xhci_virt_device *virt_dev; |
| 3495 | int ret = 0; |
| 3496 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3497 | struct xhci_slot_ctx *slot_ctx; |
| 3498 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 3499 | u64 temp_64; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3500 | |
| 3501 | if (!udev->slot_id) { |
| 3502 | xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); |
| 3503 | return -EINVAL; |
| 3504 | } |
| 3505 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3506 | virt_dev = xhci->devs[udev->slot_id]; |
| 3507 | |
Matt Evans | 7ed603e | 2011-03-29 13:40:56 +1100 | [diff] [blame] | 3508 | if (WARN_ON(!virt_dev)) { |
| 3509 | /* |
| 3510 | * In plug/unplug torture test with an NEC controller, |
| 3511 | * a zero-dereference was observed once due to virt_dev = 0. |
| 3512 | * Print useful debug rather than crash if it is observed again! |
| 3513 | */ |
| 3514 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", |
| 3515 | udev->slot_id); |
| 3516 | return -EINVAL; |
| 3517 | } |
| 3518 | |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3519 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
| 3520 | /* |
| 3521 | * If this is the first Set Address since device plug-in or |
| 3522 | * virt_device realloaction after a resume with an xHCI power loss, |
| 3523 | * then set up the slot context. |
| 3524 | */ |
| 3525 | if (!slot_ctx->dev_info) |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3526 | xhci_setup_addressable_virt_dev(xhci, udev); |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3527 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
Sarah Sharp | 2d1ee59 | 2010-07-09 17:08:54 +0200 | [diff] [blame] | 3528 | else |
| 3529 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
Sarah Sharp | d31c285 | 2011-11-03 13:06:08 -0700 | [diff] [blame] | 3530 | ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); |
| 3531 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); |
| 3532 | ctrl_ctx->drop_flags = 0; |
| 3533 | |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 3534 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3535 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3536 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 3537 | spin_lock_irqsave(&xhci->lock, flags); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3538 | ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, |
| 3539 | udev->slot_id); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3540 | if (ret) { |
| 3541 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3542 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3543 | return ret; |
| 3544 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3545 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3546 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3547 | |
| 3548 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |
| 3549 | timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, |
| 3550 | USB_CTRL_SET_TIMEOUT); |
| 3551 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
| 3552 | * the SetAddress() "recovery interval" required by USB and aborting the |
| 3553 | * command on a timeout. |
| 3554 | */ |
| 3555 | if (timeleft <= 0) { |
Andiry Xu | cd68176 | 2011-09-23 14:19:55 -0700 | [diff] [blame] | 3556 | xhci_warn(xhci, "%s while waiting for address device command\n", |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3557 | timeleft == 0 ? "Timeout" : "Signal"); |
| 3558 | /* FIXME cancel the address device command */ |
| 3559 | return -ETIME; |
| 3560 | } |
| 3561 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3562 | switch (virt_dev->cmd_status) { |
| 3563 | case COMP_CTX_STATE: |
| 3564 | case COMP_EBADSLT: |
| 3565 | xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", |
| 3566 | udev->slot_id); |
| 3567 | ret = -EINVAL; |
| 3568 | break; |
| 3569 | case COMP_TX_ERR: |
| 3570 | dev_warn(&udev->dev, "Device not responding to set address.\n"); |
| 3571 | ret = -EPROTO; |
| 3572 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 3573 | case COMP_DEV_ERR: |
| 3574 | dev_warn(&udev->dev, "ERROR: Incompatible device for address " |
| 3575 | "device command.\n"); |
| 3576 | ret = -ENODEV; |
| 3577 | break; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3578 | case COMP_SUCCESS: |
| 3579 | xhci_dbg(xhci, "Successful Address Device command\n"); |
| 3580 | break; |
| 3581 | default: |
| 3582 | xhci_err(xhci, "ERROR: unexpected command completion " |
| 3583 | "code 0x%x.\n", virt_dev->cmd_status); |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 3584 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3585 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3586 | ret = -EINVAL; |
| 3587 | break; |
| 3588 | } |
| 3589 | if (ret) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3590 | return ret; |
| 3591 | } |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 3592 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
| 3593 | xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); |
| 3594 | xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 3595 | udev->slot_id, |
| 3596 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
| 3597 | (unsigned long long) |
| 3598 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 3599 | xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3600 | (unsigned long long)virt_dev->out_ctx->dma); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3601 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3602 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3603 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3604 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3605 | /* |
| 3606 | * USB core uses address 1 for the roothubs, so we add one to the |
| 3607 | * address given back to us by the HC. |
| 3608 | */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3609 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
Andiry Xu | c8d4af8 | 2010-10-14 07:22:51 -0700 | [diff] [blame] | 3610 | /* Use kernel assigned address for devices; store xHC assigned |
| 3611 | * address locally. */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 3612 | virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) |
| 3613 | + 1; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 3614 | /* Zero the input context control for later use */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3615 | ctrl_ctx->add_flags = 0; |
| 3616 | ctrl_ctx->drop_flags = 0; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3617 | |
Andiry Xu | c8d4af8 | 2010-10-14 07:22:51 -0700 | [diff] [blame] | 3618 | xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3619 | |
| 3620 | return 0; |
| 3621 | } |
| 3622 | |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3623 | #ifdef CONFIG_USB_SUSPEND |
| 3624 | |
| 3625 | /* BESL to HIRD Encoding array for USB2 LPM */ |
| 3626 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, |
| 3627 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; |
| 3628 | |
| 3629 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3630 | static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, |
| 3631 | struct usb_device *udev) |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3632 | { |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3633 | int u2del, besl, besl_host; |
| 3634 | int besl_device = 0; |
| 3635 | u32 field; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3636 | |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3637 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); |
| 3638 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
| 3639 | |
| 3640 | if (field & USB_BESL_SUPPORT) { |
| 3641 | for (besl_host = 0; besl_host < 16; besl_host++) { |
| 3642 | if (xhci_besl_encoding[besl_host] >= u2del) |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3643 | break; |
| 3644 | } |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3645 | /* Use baseline BESL value as default */ |
| 3646 | if (field & USB_BESL_BASELINE_VALID) |
| 3647 | besl_device = USB_GET_BESL_BASELINE(field); |
| 3648 | else if (field & USB_BESL_DEEP_VALID) |
| 3649 | besl_device = USB_GET_BESL_DEEP(field); |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3650 | } else { |
| 3651 | if (u2del <= 50) |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3652 | besl_host = 0; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3653 | else |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3654 | besl_host = (u2del - 51) / 75 + 1; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3655 | } |
| 3656 | |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3657 | besl = besl_host + besl_device; |
| 3658 | if (besl > 15) |
| 3659 | besl = 15; |
| 3660 | |
| 3661 | return besl; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3662 | } |
| 3663 | |
| 3664 | static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd, |
| 3665 | struct usb_device *udev) |
| 3666 | { |
| 3667 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 3668 | struct dev_info *dev_info; |
| 3669 | __le32 __iomem **port_array; |
| 3670 | __le32 __iomem *addr, *pm_addr; |
| 3671 | u32 temp, dev_id; |
| 3672 | unsigned int port_num; |
| 3673 | unsigned long flags; |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3674 | int hird; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3675 | int ret; |
| 3676 | |
| 3677 | if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || |
| 3678 | !udev->lpm_capable) |
| 3679 | return -EINVAL; |
| 3680 | |
| 3681 | /* we only support lpm for non-hub device connected to root hub yet */ |
| 3682 | if (!udev->parent || udev->parent->parent || |
| 3683 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
| 3684 | return -EINVAL; |
| 3685 | |
| 3686 | spin_lock_irqsave(&xhci->lock, flags); |
| 3687 | |
| 3688 | /* Look for devices in lpm_failed_devs list */ |
| 3689 | dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 | |
| 3690 | le16_to_cpu(udev->descriptor.idProduct); |
| 3691 | list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) { |
| 3692 | if (dev_info->dev_id == dev_id) { |
| 3693 | ret = -EINVAL; |
| 3694 | goto finish; |
| 3695 | } |
| 3696 | } |
| 3697 | |
| 3698 | port_array = xhci->usb2_ports; |
| 3699 | port_num = udev->portnum - 1; |
| 3700 | |
| 3701 | if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) { |
| 3702 | xhci_dbg(xhci, "invalid port number %d\n", udev->portnum); |
| 3703 | ret = -EINVAL; |
| 3704 | goto finish; |
| 3705 | } |
| 3706 | |
| 3707 | /* |
| 3708 | * Test USB 2.0 software LPM. |
| 3709 | * FIXME: some xHCI 1.0 hosts may implement a new register to set up |
| 3710 | * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1 |
| 3711 | * in the June 2011 errata release. |
| 3712 | */ |
| 3713 | xhci_dbg(xhci, "test port %d software LPM\n", port_num); |
| 3714 | /* |
| 3715 | * Set L1 Device Slot and HIRD/BESL. |
| 3716 | * Check device's USB 2.0 extension descriptor to determine whether |
| 3717 | * HIRD or BESL shoule be used. See USB2.0 LPM errata. |
| 3718 | */ |
| 3719 | pm_addr = port_array[port_num] + 1; |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3720 | hird = xhci_calculate_hird_besl(xhci, udev); |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 3721 | temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird); |
| 3722 | xhci_writel(xhci, temp, pm_addr); |
| 3723 | |
| 3724 | /* Set port link state to U2(L1) */ |
| 3725 | addr = port_array[port_num]; |
| 3726 | xhci_set_link_state(xhci, port_array, port_num, XDEV_U2); |
| 3727 | |
| 3728 | /* wait for ACK */ |
| 3729 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3730 | msleep(10); |
| 3731 | spin_lock_irqsave(&xhci->lock, flags); |
| 3732 | |
| 3733 | /* Check L1 Status */ |
| 3734 | ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125); |
| 3735 | if (ret != -ETIMEDOUT) { |
| 3736 | /* enter L1 successfully */ |
| 3737 | temp = xhci_readl(xhci, addr); |
| 3738 | xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n", |
| 3739 | port_num, temp); |
| 3740 | ret = 0; |
| 3741 | } else { |
| 3742 | temp = xhci_readl(xhci, pm_addr); |
| 3743 | xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n", |
| 3744 | port_num, temp & PORT_L1S_MASK); |
| 3745 | ret = -EINVAL; |
| 3746 | } |
| 3747 | |
| 3748 | /* Resume the port */ |
| 3749 | xhci_set_link_state(xhci, port_array, port_num, XDEV_U0); |
| 3750 | |
| 3751 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3752 | msleep(10); |
| 3753 | spin_lock_irqsave(&xhci->lock, flags); |
| 3754 | |
| 3755 | /* Clear PLC */ |
| 3756 | xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC); |
| 3757 | |
| 3758 | /* Check PORTSC to make sure the device is in the right state */ |
| 3759 | if (!ret) { |
| 3760 | temp = xhci_readl(xhci, addr); |
| 3761 | xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp); |
| 3762 | if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) || |
| 3763 | (temp & PORT_PLS_MASK) != XDEV_U0) { |
| 3764 | xhci_dbg(xhci, "port L1 resume fail\n"); |
| 3765 | ret = -EINVAL; |
| 3766 | } |
| 3767 | } |
| 3768 | |
| 3769 | if (ret) { |
| 3770 | /* Insert dev to lpm_failed_devs list */ |
| 3771 | xhci_warn(xhci, "device LPM test failed, may disconnect and " |
| 3772 | "re-enumerate\n"); |
| 3773 | dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC); |
| 3774 | if (!dev_info) { |
| 3775 | ret = -ENOMEM; |
| 3776 | goto finish; |
| 3777 | } |
| 3778 | dev_info->dev_id = dev_id; |
| 3779 | INIT_LIST_HEAD(&dev_info->list); |
| 3780 | list_add(&dev_info->list, &xhci->lpm_failed_devs); |
| 3781 | } else { |
| 3782 | xhci_ring_device(xhci, udev->slot_id); |
| 3783 | } |
| 3784 | |
| 3785 | finish: |
| 3786 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3787 | return ret; |
| 3788 | } |
| 3789 | |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 3790 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
| 3791 | struct usb_device *udev, int enable) |
| 3792 | { |
| 3793 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 3794 | __le32 __iomem **port_array; |
| 3795 | __le32 __iomem *pm_addr; |
| 3796 | u32 temp; |
| 3797 | unsigned int port_num; |
| 3798 | unsigned long flags; |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3799 | int hird; |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 3800 | |
| 3801 | if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || |
| 3802 | !udev->lpm_capable) |
| 3803 | return -EPERM; |
| 3804 | |
| 3805 | if (!udev->parent || udev->parent->parent || |
| 3806 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
| 3807 | return -EPERM; |
| 3808 | |
| 3809 | if (udev->usb2_hw_lpm_capable != 1) |
| 3810 | return -EPERM; |
| 3811 | |
| 3812 | spin_lock_irqsave(&xhci->lock, flags); |
| 3813 | |
| 3814 | port_array = xhci->usb2_ports; |
| 3815 | port_num = udev->portnum - 1; |
| 3816 | pm_addr = port_array[port_num] + 1; |
| 3817 | temp = xhci_readl(xhci, pm_addr); |
| 3818 | |
| 3819 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", |
| 3820 | enable ? "enable" : "disable", port_num); |
| 3821 | |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 3822 | hird = xhci_calculate_hird_besl(xhci, udev); |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 3823 | |
| 3824 | if (enable) { |
| 3825 | temp &= ~PORT_HIRD_MASK; |
| 3826 | temp |= PORT_HIRD(hird) | PORT_RWE; |
| 3827 | xhci_writel(xhci, temp, pm_addr); |
| 3828 | temp = xhci_readl(xhci, pm_addr); |
| 3829 | temp |= PORT_HLE; |
| 3830 | xhci_writel(xhci, temp, pm_addr); |
| 3831 | } else { |
| 3832 | temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK); |
| 3833 | xhci_writel(xhci, temp, pm_addr); |
| 3834 | } |
| 3835 | |
| 3836 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3837 | return 0; |
| 3838 | } |
| 3839 | |
Sarah Sharp | b01bcbf | 2012-05-21 07:54:42 -0700 | [diff] [blame] | 3840 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
| 3841 | { |
| 3842 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 3843 | int ret; |
| 3844 | |
| 3845 | ret = xhci_usb2_software_lpm_test(hcd, udev); |
| 3846 | if (!ret) { |
| 3847 | xhci_dbg(xhci, "software LPM test succeed\n"); |
| 3848 | if (xhci->hw_lpm_support == 1) { |
| 3849 | udev->usb2_hw_lpm_capable = 1; |
| 3850 | ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1); |
| 3851 | if (!ret) |
| 3852 | udev->usb2_hw_lpm_enabled = 1; |
| 3853 | } |
| 3854 | } |
| 3855 | |
| 3856 | return 0; |
| 3857 | } |
| 3858 | |
| 3859 | #else |
| 3860 | |
| 3861 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
| 3862 | struct usb_device *udev, int enable) |
| 3863 | { |
| 3864 | return 0; |
| 3865 | } |
| 3866 | |
| 3867 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
| 3868 | { |
| 3869 | return 0; |
| 3870 | } |
| 3871 | |
| 3872 | #endif /* CONFIG_USB_SUSPEND */ |
| 3873 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 3874 | /*---------------------- USB 3.0 Link PM functions ------------------------*/ |
| 3875 | |
Sarah Sharp | b01bcbf | 2012-05-21 07:54:42 -0700 | [diff] [blame] | 3876 | #ifdef CONFIG_PM |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 3877 | /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ |
| 3878 | static unsigned long long xhci_service_interval_to_ns( |
| 3879 | struct usb_endpoint_descriptor *desc) |
| 3880 | { |
| 3881 | return (1 << (desc->bInterval - 1)) * 125 * 1000; |
| 3882 | } |
| 3883 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 3884 | static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, |
| 3885 | enum usb3_link_state state) |
| 3886 | { |
| 3887 | unsigned long long sel; |
| 3888 | unsigned long long pel; |
| 3889 | unsigned int max_sel_pel; |
| 3890 | char *state_name; |
| 3891 | |
| 3892 | switch (state) { |
| 3893 | case USB3_LPM_U1: |
| 3894 | /* Convert SEL and PEL stored in nanoseconds to microseconds */ |
| 3895 | sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); |
| 3896 | pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); |
| 3897 | max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; |
| 3898 | state_name = "U1"; |
| 3899 | break; |
| 3900 | case USB3_LPM_U2: |
| 3901 | sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); |
| 3902 | pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); |
| 3903 | max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; |
| 3904 | state_name = "U2"; |
| 3905 | break; |
| 3906 | default: |
| 3907 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", |
| 3908 | __func__); |
Sarah Sharp | e25e62a | 2012-06-07 11:10:32 -0700 | [diff] [blame] | 3909 | return USB3_LPM_DISABLED; |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 3910 | } |
| 3911 | |
| 3912 | if (sel <= max_sel_pel && pel <= max_sel_pel) |
| 3913 | return USB3_LPM_DEVICE_INITIATED; |
| 3914 | |
| 3915 | if (sel > max_sel_pel) |
| 3916 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
| 3917 | "due to long SEL %llu ms\n", |
| 3918 | state_name, sel); |
| 3919 | else |
| 3920 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
| 3921 | "due to long PEL %llu\n ms", |
| 3922 | state_name, pel); |
| 3923 | return USB3_LPM_DISABLED; |
| 3924 | } |
| 3925 | |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 3926 | /* Returns the hub-encoded U1 timeout value. |
| 3927 | * The U1 timeout should be the maximum of the following values: |
| 3928 | * - For control endpoints, U1 system exit latency (SEL) * 3 |
| 3929 | * - For bulk endpoints, U1 SEL * 5 |
| 3930 | * - For interrupt endpoints: |
| 3931 | * - Notification EPs, U1 SEL * 3 |
| 3932 | * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) |
| 3933 | * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) |
| 3934 | */ |
| 3935 | static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev, |
| 3936 | struct usb_endpoint_descriptor *desc) |
| 3937 | { |
| 3938 | unsigned long long timeout_ns; |
| 3939 | int ep_type; |
| 3940 | int intr_type; |
| 3941 | |
| 3942 | ep_type = usb_endpoint_type(desc); |
| 3943 | switch (ep_type) { |
| 3944 | case USB_ENDPOINT_XFER_CONTROL: |
| 3945 | timeout_ns = udev->u1_params.sel * 3; |
| 3946 | break; |
| 3947 | case USB_ENDPOINT_XFER_BULK: |
| 3948 | timeout_ns = udev->u1_params.sel * 5; |
| 3949 | break; |
| 3950 | case USB_ENDPOINT_XFER_INT: |
| 3951 | intr_type = usb_endpoint_interrupt_type(desc); |
| 3952 | if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { |
| 3953 | timeout_ns = udev->u1_params.sel * 3; |
| 3954 | break; |
| 3955 | } |
| 3956 | /* Otherwise the calculation is the same as isoc eps */ |
| 3957 | case USB_ENDPOINT_XFER_ISOC: |
| 3958 | timeout_ns = xhci_service_interval_to_ns(desc); |
Sarah Sharp | c88db16 | 2012-05-21 08:44:33 -0700 | [diff] [blame] | 3959 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 3960 | if (timeout_ns < udev->u1_params.sel * 2) |
| 3961 | timeout_ns = udev->u1_params.sel * 2; |
| 3962 | break; |
| 3963 | default: |
| 3964 | return 0; |
| 3965 | } |
| 3966 | |
| 3967 | /* The U1 timeout is encoded in 1us intervals. */ |
Sarah Sharp | c88db16 | 2012-05-21 08:44:33 -0700 | [diff] [blame] | 3968 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 3969 | /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */ |
| 3970 | if (timeout_ns == USB3_LPM_DISABLED) |
| 3971 | timeout_ns++; |
| 3972 | |
| 3973 | /* If the necessary timeout value is bigger than what we can set in the |
| 3974 | * USB 3.0 hub, we have to disable hub-initiated U1. |
| 3975 | */ |
| 3976 | if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) |
| 3977 | return timeout_ns; |
| 3978 | dev_dbg(&udev->dev, "Hub-initiated U1 disabled " |
| 3979 | "due to long timeout %llu ms\n", timeout_ns); |
| 3980 | return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); |
| 3981 | } |
| 3982 | |
| 3983 | /* Returns the hub-encoded U2 timeout value. |
| 3984 | * The U2 timeout should be the maximum of: |
| 3985 | * - 10 ms (to avoid the bandwidth impact on the scheduler) |
| 3986 | * - largest bInterval of any active periodic endpoint (to avoid going |
| 3987 | * into lower power link states between intervals). |
| 3988 | * - the U2 Exit Latency of the device |
| 3989 | */ |
| 3990 | static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev, |
| 3991 | struct usb_endpoint_descriptor *desc) |
| 3992 | { |
| 3993 | unsigned long long timeout_ns; |
| 3994 | unsigned long long u2_del_ns; |
| 3995 | |
| 3996 | timeout_ns = 10 * 1000 * 1000; |
| 3997 | |
| 3998 | if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && |
| 3999 | (xhci_service_interval_to_ns(desc) > timeout_ns)) |
| 4000 | timeout_ns = xhci_service_interval_to_ns(desc); |
| 4001 | |
| 4002 | u2_del_ns = udev->bos->ss_cap->bU2DevExitLat * 1000; |
| 4003 | if (u2_del_ns > timeout_ns) |
| 4004 | timeout_ns = u2_del_ns; |
| 4005 | |
| 4006 | /* The U2 timeout is encoded in 256us intervals */ |
Sarah Sharp | c88db16 | 2012-05-21 08:44:33 -0700 | [diff] [blame] | 4007 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4008 | /* If the necessary timeout value is bigger than what we can set in the |
| 4009 | * USB 3.0 hub, we have to disable hub-initiated U2. |
| 4010 | */ |
| 4011 | if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) |
| 4012 | return timeout_ns; |
| 4013 | dev_dbg(&udev->dev, "Hub-initiated U2 disabled " |
| 4014 | "due to long timeout %llu ms\n", timeout_ns); |
| 4015 | return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); |
| 4016 | } |
| 4017 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4018 | static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
| 4019 | struct usb_device *udev, |
| 4020 | struct usb_endpoint_descriptor *desc, |
| 4021 | enum usb3_link_state state, |
| 4022 | u16 *timeout) |
| 4023 | { |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4024 | if (state == USB3_LPM_U1) { |
| 4025 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 4026 | return xhci_calculate_intel_u1_timeout(udev, desc); |
| 4027 | } else { |
| 4028 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 4029 | return xhci_calculate_intel_u2_timeout(udev, desc); |
| 4030 | } |
| 4031 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4032 | return USB3_LPM_DISABLED; |
| 4033 | } |
| 4034 | |
| 4035 | static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
| 4036 | struct usb_device *udev, |
| 4037 | struct usb_endpoint_descriptor *desc, |
| 4038 | enum usb3_link_state state, |
| 4039 | u16 *timeout) |
| 4040 | { |
| 4041 | u16 alt_timeout; |
| 4042 | |
| 4043 | alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, |
| 4044 | desc, state, timeout); |
| 4045 | |
| 4046 | /* If we found we can't enable hub-initiated LPM, or |
| 4047 | * the U1 or U2 exit latency was too high to allow |
| 4048 | * device-initiated LPM as well, just stop searching. |
| 4049 | */ |
| 4050 | if (alt_timeout == USB3_LPM_DISABLED || |
| 4051 | alt_timeout == USB3_LPM_DEVICE_INITIATED) { |
| 4052 | *timeout = alt_timeout; |
| 4053 | return -E2BIG; |
| 4054 | } |
| 4055 | if (alt_timeout > *timeout) |
| 4056 | *timeout = alt_timeout; |
| 4057 | return 0; |
| 4058 | } |
| 4059 | |
| 4060 | static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, |
| 4061 | struct usb_device *udev, |
| 4062 | struct usb_host_interface *alt, |
| 4063 | enum usb3_link_state state, |
| 4064 | u16 *timeout) |
| 4065 | { |
| 4066 | int j; |
| 4067 | |
| 4068 | for (j = 0; j < alt->desc.bNumEndpoints; j++) { |
| 4069 | if (xhci_update_timeout_for_endpoint(xhci, udev, |
| 4070 | &alt->endpoint[j].desc, state, timeout)) |
| 4071 | return -E2BIG; |
| 4072 | continue; |
| 4073 | } |
| 4074 | return 0; |
| 4075 | } |
| 4076 | |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4077 | static int xhci_check_intel_tier_policy(struct usb_device *udev, |
| 4078 | enum usb3_link_state state) |
| 4079 | { |
| 4080 | struct usb_device *parent; |
| 4081 | unsigned int num_hubs; |
| 4082 | |
| 4083 | if (state == USB3_LPM_U2) |
| 4084 | return 0; |
| 4085 | |
| 4086 | /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ |
| 4087 | for (parent = udev->parent, num_hubs = 0; parent->parent; |
| 4088 | parent = parent->parent) |
| 4089 | num_hubs++; |
| 4090 | |
| 4091 | if (num_hubs < 2) |
| 4092 | return 0; |
| 4093 | |
| 4094 | dev_dbg(&udev->dev, "Disabling U1 link state for device" |
| 4095 | " below second-tier hub.\n"); |
| 4096 | dev_dbg(&udev->dev, "Plug device into first-tier hub " |
| 4097 | "to decrease power consumption.\n"); |
| 4098 | return -E2BIG; |
| 4099 | } |
| 4100 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4101 | static int xhci_check_tier_policy(struct xhci_hcd *xhci, |
| 4102 | struct usb_device *udev, |
| 4103 | enum usb3_link_state state) |
| 4104 | { |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4105 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 4106 | return xhci_check_intel_tier_policy(udev, state); |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4107 | return -EINVAL; |
| 4108 | } |
| 4109 | |
| 4110 | /* Returns the U1 or U2 timeout that should be enabled. |
| 4111 | * If the tier check or timeout setting functions return with a non-zero exit |
| 4112 | * code, that means the timeout value has been finalized and we shouldn't look |
| 4113 | * at any more endpoints. |
| 4114 | */ |
| 4115 | static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, |
| 4116 | struct usb_device *udev, enum usb3_link_state state) |
| 4117 | { |
| 4118 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 4119 | struct usb_host_config *config; |
| 4120 | char *state_name; |
| 4121 | int i; |
| 4122 | u16 timeout = USB3_LPM_DISABLED; |
| 4123 | |
| 4124 | if (state == USB3_LPM_U1) |
| 4125 | state_name = "U1"; |
| 4126 | else if (state == USB3_LPM_U2) |
| 4127 | state_name = "U2"; |
| 4128 | else { |
| 4129 | dev_warn(&udev->dev, "Can't enable unknown link state %i\n", |
| 4130 | state); |
| 4131 | return timeout; |
| 4132 | } |
| 4133 | |
| 4134 | if (xhci_check_tier_policy(xhci, udev, state) < 0) |
| 4135 | return timeout; |
| 4136 | |
| 4137 | /* Gather some information about the currently installed configuration |
| 4138 | * and alternate interface settings. |
| 4139 | */ |
| 4140 | if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, |
| 4141 | state, &timeout)) |
| 4142 | return timeout; |
| 4143 | |
| 4144 | config = udev->actconfig; |
| 4145 | if (!config) |
| 4146 | return timeout; |
| 4147 | |
| 4148 | for (i = 0; i < USB_MAXINTERFACES; i++) { |
| 4149 | struct usb_driver *driver; |
| 4150 | struct usb_interface *intf = config->interface[i]; |
| 4151 | |
| 4152 | if (!intf) |
| 4153 | continue; |
| 4154 | |
| 4155 | /* Check if any currently bound drivers want hub-initiated LPM |
| 4156 | * disabled. |
| 4157 | */ |
| 4158 | if (intf->dev.driver) { |
| 4159 | driver = to_usb_driver(intf->dev.driver); |
| 4160 | if (driver && driver->disable_hub_initiated_lpm) { |
| 4161 | dev_dbg(&udev->dev, "Hub-initiated %s disabled " |
| 4162 | "at request of driver %s\n", |
| 4163 | state_name, driver->name); |
| 4164 | return xhci_get_timeout_no_hub_lpm(udev, state); |
| 4165 | } |
| 4166 | } |
| 4167 | |
| 4168 | /* Not sure how this could happen... */ |
| 4169 | if (!intf->cur_altsetting) |
| 4170 | continue; |
| 4171 | |
| 4172 | if (xhci_update_timeout_for_interface(xhci, udev, |
| 4173 | intf->cur_altsetting, |
| 4174 | state, &timeout)) |
| 4175 | return timeout; |
| 4176 | } |
| 4177 | return timeout; |
| 4178 | } |
| 4179 | |
| 4180 | /* |
| 4181 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the |
| 4182 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. |
| 4183 | */ |
| 4184 | static int xhci_change_max_exit_latency(struct xhci_hcd *xhci, |
| 4185 | struct usb_device *udev, u16 max_exit_latency) |
| 4186 | { |
| 4187 | struct xhci_virt_device *virt_dev; |
| 4188 | struct xhci_command *command; |
| 4189 | struct xhci_input_control_ctx *ctrl_ctx; |
| 4190 | struct xhci_slot_ctx *slot_ctx; |
| 4191 | unsigned long flags; |
| 4192 | int ret; |
| 4193 | |
| 4194 | spin_lock_irqsave(&xhci->lock, flags); |
| 4195 | if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) { |
| 4196 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4197 | return 0; |
| 4198 | } |
| 4199 | |
| 4200 | /* Attempt to issue an Evaluate Context command to change the MEL. */ |
| 4201 | virt_dev = xhci->devs[udev->slot_id]; |
| 4202 | command = xhci->lpm_command; |
| 4203 | xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); |
| 4204 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4205 | |
| 4206 | ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); |
| 4207 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
| 4208 | slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); |
| 4209 | slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); |
| 4210 | slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); |
| 4211 | |
| 4212 | xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n"); |
| 4213 | xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); |
| 4214 | xhci_dbg_ctx(xhci, command->in_ctx, 0); |
| 4215 | |
| 4216 | /* Issue and wait for the evaluate context command. */ |
| 4217 | ret = xhci_configure_endpoint(xhci, udev, command, |
| 4218 | true, true); |
| 4219 | xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id); |
| 4220 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0); |
| 4221 | |
| 4222 | if (!ret) { |
| 4223 | spin_lock_irqsave(&xhci->lock, flags); |
| 4224 | virt_dev->current_mel = max_exit_latency; |
| 4225 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4226 | } |
| 4227 | return ret; |
| 4228 | } |
| 4229 | |
| 4230 | static int calculate_max_exit_latency(struct usb_device *udev, |
| 4231 | enum usb3_link_state state_changed, |
| 4232 | u16 hub_encoded_timeout) |
| 4233 | { |
| 4234 | unsigned long long u1_mel_us = 0; |
| 4235 | unsigned long long u2_mel_us = 0; |
| 4236 | unsigned long long mel_us = 0; |
| 4237 | bool disabling_u1; |
| 4238 | bool disabling_u2; |
| 4239 | bool enabling_u1; |
| 4240 | bool enabling_u2; |
| 4241 | |
| 4242 | disabling_u1 = (state_changed == USB3_LPM_U1 && |
| 4243 | hub_encoded_timeout == USB3_LPM_DISABLED); |
| 4244 | disabling_u2 = (state_changed == USB3_LPM_U2 && |
| 4245 | hub_encoded_timeout == USB3_LPM_DISABLED); |
| 4246 | |
| 4247 | enabling_u1 = (state_changed == USB3_LPM_U1 && |
| 4248 | hub_encoded_timeout != USB3_LPM_DISABLED); |
| 4249 | enabling_u2 = (state_changed == USB3_LPM_U2 && |
| 4250 | hub_encoded_timeout != USB3_LPM_DISABLED); |
| 4251 | |
| 4252 | /* If U1 was already enabled and we're not disabling it, |
| 4253 | * or we're going to enable U1, account for the U1 max exit latency. |
| 4254 | */ |
| 4255 | if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || |
| 4256 | enabling_u1) |
| 4257 | u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); |
| 4258 | if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || |
| 4259 | enabling_u2) |
| 4260 | u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); |
| 4261 | |
| 4262 | if (u1_mel_us > u2_mel_us) |
| 4263 | mel_us = u1_mel_us; |
| 4264 | else |
| 4265 | mel_us = u2_mel_us; |
| 4266 | /* xHCI host controller max exit latency field is only 16 bits wide. */ |
| 4267 | if (mel_us > MAX_EXIT) { |
| 4268 | dev_warn(&udev->dev, "Link PM max exit latency of %lluus " |
| 4269 | "is too big.\n", mel_us); |
| 4270 | return -E2BIG; |
| 4271 | } |
| 4272 | return mel_us; |
| 4273 | } |
| 4274 | |
| 4275 | /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ |
| 4276 | int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
| 4277 | struct usb_device *udev, enum usb3_link_state state) |
| 4278 | { |
| 4279 | struct xhci_hcd *xhci; |
| 4280 | u16 hub_encoded_timeout; |
| 4281 | int mel; |
| 4282 | int ret; |
| 4283 | |
| 4284 | xhci = hcd_to_xhci(hcd); |
| 4285 | /* The LPM timeout values are pretty host-controller specific, so don't |
| 4286 | * enable hub-initiated timeouts unless the vendor has provided |
| 4287 | * information about their timeout algorithm. |
| 4288 | */ |
| 4289 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
| 4290 | !xhci->devs[udev->slot_id]) |
| 4291 | return USB3_LPM_DISABLED; |
| 4292 | |
| 4293 | hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); |
| 4294 | mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); |
| 4295 | if (mel < 0) { |
| 4296 | /* Max Exit Latency is too big, disable LPM. */ |
| 4297 | hub_encoded_timeout = USB3_LPM_DISABLED; |
| 4298 | mel = 0; |
| 4299 | } |
| 4300 | |
| 4301 | ret = xhci_change_max_exit_latency(xhci, udev, mel); |
| 4302 | if (ret) |
| 4303 | return ret; |
| 4304 | return hub_encoded_timeout; |
| 4305 | } |
| 4306 | |
| 4307 | int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
| 4308 | struct usb_device *udev, enum usb3_link_state state) |
| 4309 | { |
| 4310 | struct xhci_hcd *xhci; |
| 4311 | u16 mel; |
| 4312 | int ret; |
| 4313 | |
| 4314 | xhci = hcd_to_xhci(hcd); |
| 4315 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
| 4316 | !xhci->devs[udev->slot_id]) |
| 4317 | return 0; |
| 4318 | |
| 4319 | mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); |
| 4320 | ret = xhci_change_max_exit_latency(xhci, udev, mel); |
| 4321 | if (ret) |
| 4322 | return ret; |
| 4323 | return 0; |
| 4324 | } |
Sarah Sharp | b01bcbf | 2012-05-21 07:54:42 -0700 | [diff] [blame] | 4325 | #else /* CONFIG_PM */ |
| 4326 | |
| 4327 | int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
| 4328 | struct usb_device *udev, enum usb3_link_state state) |
| 4329 | { |
| 4330 | return USB3_LPM_DISABLED; |
| 4331 | } |
| 4332 | |
| 4333 | int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
| 4334 | struct usb_device *udev, enum usb3_link_state state) |
| 4335 | { |
| 4336 | return 0; |
| 4337 | } |
| 4338 | #endif /* CONFIG_PM */ |
| 4339 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4340 | /*-------------------------------------------------------------------------*/ |
| 4341 | |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4342 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
| 4343 | * internal data structures for the device. |
| 4344 | */ |
| 4345 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
| 4346 | struct usb_tt *tt, gfp_t mem_flags) |
| 4347 | { |
| 4348 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 4349 | struct xhci_virt_device *vdev; |
| 4350 | struct xhci_command *config_cmd; |
| 4351 | struct xhci_input_control_ctx *ctrl_ctx; |
| 4352 | struct xhci_slot_ctx *slot_ctx; |
| 4353 | unsigned long flags; |
| 4354 | unsigned think_time; |
| 4355 | int ret; |
| 4356 | |
| 4357 | /* Ignore root hubs */ |
| 4358 | if (!hdev->parent) |
| 4359 | return 0; |
| 4360 | |
| 4361 | vdev = xhci->devs[hdev->slot_id]; |
| 4362 | if (!vdev) { |
| 4363 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); |
| 4364 | return -EINVAL; |
| 4365 | } |
Sarah Sharp | a1d78c1 | 2009-12-09 15:59:03 -0800 | [diff] [blame] | 4366 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4367 | if (!config_cmd) { |
| 4368 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); |
| 4369 | return -ENOMEM; |
| 4370 | } |
| 4371 | |
| 4372 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 839c817 | 2011-09-02 11:05:47 -0700 | [diff] [blame] | 4373 | if (hdev->speed == USB_SPEED_HIGH && |
| 4374 | xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { |
| 4375 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); |
| 4376 | xhci_free_command(xhci, config_cmd); |
| 4377 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4378 | return -ENOMEM; |
| 4379 | } |
| 4380 | |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4381 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
| 4382 | ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 4383 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4384 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 4385 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4386 | if (tt->multi) |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 4387 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4388 | if (xhci->hci_version > 0x95) { |
| 4389 | xhci_dbg(xhci, "xHCI version %x needs hub " |
| 4390 | "TT think time and number of ports\n", |
| 4391 | (unsigned int) xhci->hci_version); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 4392 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4393 | /* Set TT think time - convert from ns to FS bit times. |
| 4394 | * 0 = 8 FS bit times, 1 = 16 FS bit times, |
| 4395 | * 2 = 24 FS bit times, 3 = 32 FS bit times. |
Andiry Xu | 700b417 | 2011-05-05 18:14:05 +0800 | [diff] [blame] | 4396 | * |
| 4397 | * xHCI 1.0: this field shall be 0 if the device is not a |
| 4398 | * High-spped hub. |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4399 | */ |
| 4400 | think_time = tt->think_time; |
| 4401 | if (think_time != 0) |
| 4402 | think_time = (think_time / 666) - 1; |
Andiry Xu | 700b417 | 2011-05-05 18:14:05 +0800 | [diff] [blame] | 4403 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
| 4404 | slot_ctx->tt_info |= |
| 4405 | cpu_to_le32(TT_THINK_TIME(think_time)); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4406 | } else { |
| 4407 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " |
| 4408 | "TT think time or number of ports\n", |
| 4409 | (unsigned int) xhci->hci_version); |
| 4410 | } |
| 4411 | slot_ctx->dev_state = 0; |
| 4412 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4413 | |
| 4414 | xhci_dbg(xhci, "Set up %s for hub device.\n", |
| 4415 | (xhci->hci_version > 0x95) ? |
| 4416 | "configure endpoint" : "evaluate context"); |
| 4417 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); |
| 4418 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); |
| 4419 | |
| 4420 | /* Issue and wait for the configure endpoint or |
| 4421 | * evaluate context command. |
| 4422 | */ |
| 4423 | if (xhci->hci_version > 0x95) |
| 4424 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, |
| 4425 | false, false); |
| 4426 | else |
| 4427 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, |
| 4428 | true, false); |
| 4429 | |
| 4430 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); |
| 4431 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); |
| 4432 | |
| 4433 | xhci_free_command(xhci, config_cmd); |
| 4434 | return ret; |
| 4435 | } |
| 4436 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4437 | int xhci_get_frame(struct usb_hcd *hcd) |
| 4438 | { |
| 4439 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 4440 | /* EHCI mods by the periodic size. Why? */ |
| 4441 | return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; |
| 4442 | } |
| 4443 | |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4444 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) |
| 4445 | { |
| 4446 | struct xhci_hcd *xhci; |
| 4447 | struct device *dev = hcd->self.controller; |
| 4448 | int retval; |
| 4449 | u32 temp; |
| 4450 | |
Andiry Xu | fdaf8b3 | 2012-03-05 17:49:38 +0800 | [diff] [blame] | 4451 | /* Accept arbitrarily long scatter-gather lists */ |
| 4452 | hcd->self.sg_tablesize = ~0; |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4453 | |
| 4454 | if (usb_hcd_is_primary_hcd(hcd)) { |
| 4455 | xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); |
| 4456 | if (!xhci) |
| 4457 | return -ENOMEM; |
| 4458 | *((struct xhci_hcd **) hcd->hcd_priv) = xhci; |
| 4459 | xhci->main_hcd = hcd; |
| 4460 | /* Mark the first roothub as being USB 2.0. |
| 4461 | * The xHCI driver will register the USB 3.0 roothub. |
| 4462 | */ |
| 4463 | hcd->speed = HCD_USB2; |
| 4464 | hcd->self.root_hub->speed = USB_SPEED_HIGH; |
| 4465 | /* |
| 4466 | * USB 2.0 roothub under xHCI has an integrated TT, |
| 4467 | * (rate matching hub) as opposed to having an OHCI/UHCI |
| 4468 | * companion controller. |
| 4469 | */ |
| 4470 | hcd->has_tt = 1; |
| 4471 | } else { |
| 4472 | /* xHCI private pointer was set in xhci_pci_probe for the second |
| 4473 | * registered roothub. |
| 4474 | */ |
| 4475 | xhci = hcd_to_xhci(hcd); |
| 4476 | temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); |
| 4477 | if (HCC_64BIT_ADDR(temp)) { |
| 4478 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); |
| 4479 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); |
| 4480 | } else { |
| 4481 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); |
| 4482 | } |
| 4483 | return 0; |
| 4484 | } |
| 4485 | |
| 4486 | xhci->cap_regs = hcd->regs; |
| 4487 | xhci->op_regs = hcd->regs + |
| 4488 | HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); |
| 4489 | xhci->run_regs = hcd->regs + |
| 4490 | (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); |
| 4491 | /* Cache read-only capability registers */ |
| 4492 | xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); |
| 4493 | xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); |
| 4494 | xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); |
| 4495 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); |
| 4496 | xhci->hci_version = HC_VERSION(xhci->hcc_params); |
| 4497 | xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); |
| 4498 | xhci_print_registers(xhci); |
| 4499 | |
| 4500 | get_quirks(dev, xhci); |
| 4501 | |
| 4502 | /* Make sure the HC is halted. */ |
| 4503 | retval = xhci_halt(xhci); |
| 4504 | if (retval) |
| 4505 | goto error; |
| 4506 | |
| 4507 | xhci_dbg(xhci, "Resetting HCD\n"); |
| 4508 | /* Reset the internal HC memory state and registers. */ |
| 4509 | retval = xhci_reset(xhci); |
| 4510 | if (retval) |
| 4511 | goto error; |
| 4512 | xhci_dbg(xhci, "Reset complete\n"); |
| 4513 | |
| 4514 | temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); |
| 4515 | if (HCC_64BIT_ADDR(temp)) { |
| 4516 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); |
| 4517 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); |
| 4518 | } else { |
| 4519 | dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); |
| 4520 | } |
| 4521 | |
| 4522 | xhci_dbg(xhci, "Calling HCD init\n"); |
| 4523 | /* Initialize HCD and host controller data structures. */ |
| 4524 | retval = xhci_init(hcd); |
| 4525 | if (retval) |
| 4526 | goto error; |
| 4527 | xhci_dbg(xhci, "Called HCD init\n"); |
| 4528 | return 0; |
| 4529 | error: |
| 4530 | kfree(xhci); |
| 4531 | return retval; |
| 4532 | } |
| 4533 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4534 | MODULE_DESCRIPTION(DRIVER_DESC); |
| 4535 | MODULE_AUTHOR(DRIVER_AUTHOR); |
| 4536 | MODULE_LICENSE("GPL"); |
| 4537 | |
| 4538 | static int __init xhci_hcd_init(void) |
| 4539 | { |
Sebastian Andrzej Siewior | 0cc47d5 | 2011-09-23 14:20:02 -0700 | [diff] [blame] | 4540 | int retval; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4541 | |
| 4542 | retval = xhci_register_pci(); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4543 | if (retval < 0) { |
| 4544 | printk(KERN_DEBUG "Problem registering PCI driver."); |
| 4545 | return retval; |
| 4546 | } |
Sebastian Andrzej Siewior | 3429e91 | 2012-03-13 16:57:41 +0200 | [diff] [blame] | 4547 | retval = xhci_register_plat(); |
| 4548 | if (retval < 0) { |
| 4549 | printk(KERN_DEBUG "Problem registering platform driver."); |
| 4550 | goto unreg_pci; |
| 4551 | } |
Sarah Sharp | 9844197 | 2009-05-14 11:44:18 -0700 | [diff] [blame] | 4552 | /* |
| 4553 | * Check the compiler generated sizes of structures that must be laid |
| 4554 | * out in specific ways for hardware access. |
| 4555 | */ |
| 4556 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); |
| 4557 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); |
| 4558 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); |
| 4559 | /* xhci_device_control has eight fields, and also |
| 4560 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
| 4561 | */ |
Sarah Sharp | 9844197 | 2009-05-14 11:44:18 -0700 | [diff] [blame] | 4562 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
| 4563 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
| 4564 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
| 4565 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); |
| 4566 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); |
| 4567 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ |
| 4568 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4569 | return 0; |
Sebastian Andrzej Siewior | 3429e91 | 2012-03-13 16:57:41 +0200 | [diff] [blame] | 4570 | unreg_pci: |
| 4571 | xhci_unregister_pci(); |
| 4572 | return retval; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4573 | } |
| 4574 | module_init(xhci_hcd_init); |
| 4575 | |
| 4576 | static void __exit xhci_hcd_cleanup(void) |
| 4577 | { |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4578 | xhci_unregister_pci(); |
Sebastian Andrzej Siewior | 3429e91 | 2012-03-13 16:57:41 +0200 | [diff] [blame] | 4579 | xhci_unregister_plat(); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4580 | } |
| 4581 | module_exit(xhci_hcd_cleanup); |