Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 1 | /* |
| 2 | * xHCI host controller driver |
| 3 | * |
| 4 | * Copyright (C) 2008 Intel Corp. |
| 5 | * |
| 6 | * Author: Sarah Sharp |
| 7 | * Some code borrowed from the Linux EHCI driver. |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 16 | * for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software Foundation, |
| 20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 23 | #include <linux/pci.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 24 | #include <linux/irq.h> |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 25 | #include <linux/log2.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 27 | #include <linux/moduleparam.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 28 | #include <linux/slab.h> |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 29 | #include <linux/dmi.h> |
James Hogan | 008eb95 | 2013-07-26 13:34:43 +0100 | [diff] [blame] | 30 | #include <linux/dma-mapping.h> |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 31 | |
| 32 | #include "xhci.h" |
Xenia Ragiadakou | 84a99f6 | 2013-08-06 00:22:15 +0300 | [diff] [blame] | 33 | #include "xhci-trace.h" |
Chunfeng Yun | 0cbd4b3 | 2015-11-24 13:09:55 +0200 | [diff] [blame] | 34 | #include "xhci-mtk.h" |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 35 | |
| 36 | #define DRIVER_AUTHOR "Sarah Sharp" |
| 37 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
| 38 | |
Lu Baolu | a1377e5 | 2014-11-18 11:27:14 +0200 | [diff] [blame] | 39 | #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) |
| 40 | |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 41 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
| 42 | static int link_quirk; |
| 43 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); |
| 44 | MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); |
| 45 | |
Takashi Iwai | 4e6a1ee | 2013-12-09 12:42:48 +0100 | [diff] [blame] | 46 | static unsigned int quirks; |
| 47 | module_param(quirks, uint, S_IRUGO); |
| 48 | MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); |
| 49 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 50 | /* TODO: copied from ehci-hcd.c - can this be refactored? */ |
| 51 | /* |
Sarah Sharp | 2611bd18 | 2012-10-25 13:27:51 -0700 | [diff] [blame] | 52 | * xhci_handshake - spin reading hc until handshake completes or fails |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 53 | * @ptr: address of hc register to be read |
| 54 | * @mask: bits to look at in result of read |
| 55 | * @done: value of those bits when handshake succeeds |
| 56 | * @usec: timeout in microseconds |
| 57 | * |
| 58 | * Returns negative errno, or zero on success |
| 59 | * |
| 60 | * Success happens when the "mask" bits have the specified value (hardware |
| 61 | * handshake done). There are two failure modes: "usec" have passed (major |
| 62 | * hardware flakeout), or the register reads as all-ones (hardware removed). |
| 63 | */ |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 64 | int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 65 | { |
| 66 | u32 result; |
| 67 | |
| 68 | do { |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 69 | result = readl(ptr); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 70 | if (result == ~(u32)0) /* card removed */ |
| 71 | return -ENODEV; |
| 72 | result &= mask; |
| 73 | if (result == done) |
| 74 | return 0; |
| 75 | udelay(1); |
| 76 | usec--; |
| 77 | } while (usec > 0); |
| 78 | return -ETIMEDOUT; |
| 79 | } |
| 80 | |
| 81 | /* |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 82 | * Disable interrupts and begin the xHCI halting process. |
| 83 | */ |
| 84 | void xhci_quiesce(struct xhci_hcd *xhci) |
| 85 | { |
| 86 | u32 halted; |
| 87 | u32 cmd; |
| 88 | u32 mask; |
| 89 | |
| 90 | mask = ~(XHCI_IRQS); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 91 | halted = readl(&xhci->op_regs->status) & STS_HALT; |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 92 | if (!halted) |
| 93 | mask &= ~CMD_RUN; |
| 94 | |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 95 | cmd = readl(&xhci->op_regs->command); |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 96 | cmd &= mask; |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 97 | writel(cmd, &xhci->op_regs->command); |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | /* |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 101 | * Force HC into halt state. |
| 102 | * |
| 103 | * Disable any IRQs and clear the run/stop bit. |
| 104 | * HC will complete any current and actively pipelined transactions, and |
Andiry Xu | bdfca50 | 2011-01-06 15:43:39 +0800 | [diff] [blame] | 105 | * should halt within 16 ms of the run/stop bit being cleared. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 106 | * Read HC Halted bit in the status register to see when the HC is finished. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 107 | */ |
| 108 | int xhci_halt(struct xhci_hcd *xhci) |
| 109 | { |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 110 | int ret; |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 111 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); |
Sarah Sharp | 4f0f0ba | 2009-10-27 10:56:33 -0700 | [diff] [blame] | 112 | xhci_quiesce(xhci); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 113 | |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 114 | ret = xhci_handshake(&xhci->op_regs->status, |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 115 | STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); |
Elric Fu | c181bc5 | 2012-06-27 16:30:57 +0800 | [diff] [blame] | 116 | if (!ret) { |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 117 | xhci->xhc_state |= XHCI_STATE_HALTED; |
Elric Fu | c181bc5 | 2012-06-27 16:30:57 +0800 | [diff] [blame] | 118 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
| 119 | } else |
Sarah Sharp | 5af98bb | 2012-03-16 12:58:20 -0700 | [diff] [blame] | 120 | xhci_warn(xhci, "Host not halted after %u microseconds.\n", |
| 121 | XHCI_MAX_HALT_USEC); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 122 | return ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | /* |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 126 | * Set the run bit and wait for the host to be running. |
| 127 | */ |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 128 | static int xhci_start(struct xhci_hcd *xhci) |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 129 | { |
| 130 | u32 temp; |
| 131 | int ret; |
| 132 | |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 133 | temp = readl(&xhci->op_regs->command); |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 134 | temp |= (CMD_RUN); |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 135 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 136 | temp); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 137 | writel(temp, &xhci->op_regs->command); |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 138 | |
| 139 | /* |
| 140 | * Wait for the HCHalted Status bit to be 0 to indicate the host is |
| 141 | * running. |
| 142 | */ |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 143 | ret = xhci_handshake(&xhci->op_regs->status, |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 144 | STS_HALT, 0, XHCI_MAX_HALT_USEC); |
| 145 | if (ret == -ETIMEDOUT) |
| 146 | xhci_err(xhci, "Host took too long to start, " |
| 147 | "waited %u microseconds.\n", |
| 148 | XHCI_MAX_HALT_USEC); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 149 | if (!ret) |
Roger Quadros | e5bfeab | 2015-09-21 17:46:13 +0300 | [diff] [blame] | 150 | xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); |
| 151 | |
Sarah Sharp | ed07453 | 2010-05-24 13:25:21 -0700 | [diff] [blame] | 152 | return ret; |
| 153 | } |
| 154 | |
| 155 | /* |
Sarah Sharp | ac04e6f | 2011-03-11 08:47:33 -0800 | [diff] [blame] | 156 | * Reset a halted HC. |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 157 | * |
| 158 | * This resets pipelines, timers, counters, state machines, etc. |
| 159 | * Transactions will be terminated immediately, and operational registers |
| 160 | * will be set to their defaults. |
| 161 | */ |
| 162 | int xhci_reset(struct xhci_hcd *xhci) |
| 163 | { |
| 164 | u32 command; |
| 165 | u32 state; |
Andiry Xu | f370b99 | 2012-04-14 02:54:30 +0800 | [diff] [blame] | 166 | int ret, i; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 167 | |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 168 | state = readl(&xhci->op_regs->status); |
Sarah Sharp | d3512f6 | 2009-07-27 12:03:50 -0700 | [diff] [blame] | 169 | if ((state & STS_HALT) == 0) { |
| 170 | xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); |
| 171 | return 0; |
| 172 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 173 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 174 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 175 | command = readl(&xhci->op_regs->command); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 176 | command |= CMD_RESET; |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 177 | writel(command, &xhci->op_regs->command); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 178 | |
Rajmohan Mani | a596439 | 2015-11-18 10:48:20 +0200 | [diff] [blame] | 179 | /* Existing Intel xHCI controllers require a delay of 1 mS, |
| 180 | * after setting the CMD_RESET bit, and before accessing any |
| 181 | * HC registers. This allows the HC to complete the |
| 182 | * reset operation and be ready for HC register access. |
| 183 | * Without this delay, the subsequent HC register access, |
| 184 | * may result in a system hang very rarely. |
| 185 | */ |
| 186 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 187 | udelay(1000); |
| 188 | |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 189 | ret = xhci_handshake(&xhci->op_regs->command, |
Sarah Sharp | 22ceac1 | 2012-07-23 16:06:08 -0700 | [diff] [blame] | 190 | CMD_RESET, 0, 10 * 1000 * 1000); |
Sarah Sharp | 2d62f3e | 2010-05-24 13:25:15 -0700 | [diff] [blame] | 191 | if (ret) |
| 192 | return ret; |
| 193 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 194 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 195 | "Wait for controller to be ready for doorbell rings"); |
Sarah Sharp | 2d62f3e | 2010-05-24 13:25:15 -0700 | [diff] [blame] | 196 | /* |
| 197 | * xHCI cannot write to any doorbells or operational registers other |
| 198 | * than status until the "Controller Not Ready" flag is cleared. |
| 199 | */ |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 200 | ret = xhci_handshake(&xhci->op_regs->status, |
Sarah Sharp | 22ceac1 | 2012-07-23 16:06:08 -0700 | [diff] [blame] | 201 | STS_CNR, 0, 10 * 1000 * 1000); |
Andiry Xu | f370b99 | 2012-04-14 02:54:30 +0800 | [diff] [blame] | 202 | |
| 203 | for (i = 0; i < 2; ++i) { |
| 204 | xhci->bus_state[i].port_c_suspend = 0; |
| 205 | xhci->bus_state[i].suspended_ports = 0; |
| 206 | xhci->bus_state[i].resuming_ports = 0; |
| 207 | } |
| 208 | |
| 209 | return ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 210 | } |
| 211 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 212 | #ifdef CONFIG_PCI |
| 213 | static int xhci_free_msi(struct xhci_hcd *xhci) |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 214 | { |
| 215 | int i; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 216 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 217 | if (!xhci->msix_entries) |
| 218 | return -EINVAL; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 219 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 220 | for (i = 0; i < xhci->msix_count; i++) |
| 221 | if (xhci->msix_entries[i].vector) |
| 222 | free_irq(xhci->msix_entries[i].vector, |
| 223 | xhci_to_hcd(xhci)); |
| 224 | return 0; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | /* |
| 228 | * Set up MSI |
| 229 | */ |
| 230 | static int xhci_setup_msi(struct xhci_hcd *xhci) |
| 231 | { |
| 232 | int ret; |
| 233 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
| 234 | |
| 235 | ret = pci_enable_msi(pdev); |
| 236 | if (ret) { |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 237 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 238 | "failed to allocate MSI entry"); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 239 | return ret; |
| 240 | } |
| 241 | |
Alex Shi | 851ec16 | 2013-05-24 10:54:19 +0800 | [diff] [blame] | 242 | ret = request_irq(pdev->irq, xhci_msi_irq, |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 243 | 0, "xhci_hcd", xhci_to_hcd(xhci)); |
| 244 | if (ret) { |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 245 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 246 | "disable MSI interrupt"); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 247 | pci_disable_msi(pdev); |
| 248 | } |
| 249 | |
| 250 | return ret; |
| 251 | } |
| 252 | |
| 253 | /* |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 254 | * Free IRQs |
| 255 | * free all IRQs request |
| 256 | */ |
| 257 | static void xhci_free_irq(struct xhci_hcd *xhci) |
| 258 | { |
| 259 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
| 260 | int ret; |
| 261 | |
| 262 | /* return if using legacy interrupt */ |
Felipe Balbi | cd70469 | 2012-02-29 16:46:23 +0200 | [diff] [blame] | 263 | if (xhci_to_hcd(xhci)->irq > 0) |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 264 | return; |
| 265 | |
| 266 | ret = xhci_free_msi(xhci); |
| 267 | if (!ret) |
| 268 | return; |
Felipe Balbi | cd70469 | 2012-02-29 16:46:23 +0200 | [diff] [blame] | 269 | if (pdev->irq > 0) |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 270 | free_irq(pdev->irq, xhci_to_hcd(xhci)); |
| 271 | |
| 272 | return; |
| 273 | } |
| 274 | |
| 275 | /* |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 276 | * Set up MSI-X |
| 277 | */ |
| 278 | static int xhci_setup_msix(struct xhci_hcd *xhci) |
| 279 | { |
| 280 | int i, ret = 0; |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 281 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 282 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 283 | |
| 284 | /* |
| 285 | * calculate number of msi-x vectors supported. |
| 286 | * - HCS_MAX_INTRS: the max number of interrupts the host can handle, |
| 287 | * with max number of interrupters based on the xhci HCSPARAMS1. |
| 288 | * - num_online_cpus: maximum msi-x vectors per CPUs core. |
| 289 | * Add additional 1 vector to ensure always available interrupt. |
| 290 | */ |
| 291 | xhci->msix_count = min(num_online_cpus() + 1, |
| 292 | HCS_MAX_INTRS(xhci->hcs_params1)); |
| 293 | |
| 294 | xhci->msix_entries = |
| 295 | kmalloc((sizeof(struct msix_entry))*xhci->msix_count, |
Greg Kroah-Hartman | 8687197 | 2010-11-11 09:41:02 -0800 | [diff] [blame] | 296 | GFP_KERNEL); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 297 | if (!xhci->msix_entries) { |
| 298 | xhci_err(xhci, "Failed to allocate MSI-X entries\n"); |
| 299 | return -ENOMEM; |
| 300 | } |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 301 | |
| 302 | for (i = 0; i < xhci->msix_count; i++) { |
| 303 | xhci->msix_entries[i].entry = i; |
| 304 | xhci->msix_entries[i].vector = 0; |
| 305 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 306 | |
Alexander Gordeev | a62445a | 2014-05-08 19:25:58 +0300 | [diff] [blame] | 307 | ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 308 | if (ret) { |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 309 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 310 | "Failed to enable MSI-X"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 311 | goto free_entries; |
| 312 | } |
| 313 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 314 | for (i = 0; i < xhci->msix_count; i++) { |
| 315 | ret = request_irq(xhci->msix_entries[i].vector, |
Alex Shi | 851ec16 | 2013-05-24 10:54:19 +0800 | [diff] [blame] | 316 | xhci_msi_irq, |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 317 | 0, "xhci_hcd", xhci_to_hcd(xhci)); |
| 318 | if (ret) |
| 319 | goto disable_msix; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 320 | } |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 321 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 322 | hcd->msix_enabled = 1; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 323 | return ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 324 | |
| 325 | disable_msix: |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 326 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 327 | xhci_free_irq(xhci); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 328 | pci_disable_msix(pdev); |
| 329 | free_entries: |
| 330 | kfree(xhci->msix_entries); |
| 331 | xhci->msix_entries = NULL; |
| 332 | return ret; |
| 333 | } |
| 334 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 335 | /* Free any IRQs and disable MSI-X */ |
| 336 | static void xhci_cleanup_msix(struct xhci_hcd *xhci) |
| 337 | { |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 338 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 339 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 340 | |
Jack Pham | 9005355 | 2013-11-15 14:53:14 -0800 | [diff] [blame] | 341 | if (xhci->quirks & XHCI_PLAT) |
| 342 | return; |
| 343 | |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 344 | xhci_free_irq(xhci); |
| 345 | |
| 346 | if (xhci->msix_entries) { |
| 347 | pci_disable_msix(pdev); |
| 348 | kfree(xhci->msix_entries); |
| 349 | xhci->msix_entries = NULL; |
| 350 | } else { |
| 351 | pci_disable_msi(pdev); |
| 352 | } |
| 353 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 354 | hcd->msix_enabled = 0; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 355 | return; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 356 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 357 | |
Olof Johansson | d5c82fe | 2013-07-23 11:58:20 -0700 | [diff] [blame] | 358 | static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 359 | { |
| 360 | int i; |
| 361 | |
| 362 | if (xhci->msix_entries) { |
| 363 | for (i = 0; i < xhci->msix_count; i++) |
| 364 | synchronize_irq(xhci->msix_entries[i].vector); |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | static int xhci_try_enable_msi(struct usb_hcd *hcd) |
| 369 | { |
| 370 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Sarah Sharp | 52fb612 | 2013-08-08 10:08:34 -0700 | [diff] [blame] | 371 | struct pci_dev *pdev; |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 372 | int ret; |
| 373 | |
Sarah Sharp | 52fb612 | 2013-08-08 10:08:34 -0700 | [diff] [blame] | 374 | /* The xhci platform device has set up IRQs through usb_add_hcd. */ |
| 375 | if (xhci->quirks & XHCI_PLAT) |
| 376 | return 0; |
| 377 | |
| 378 | pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 379 | /* |
| 380 | * Some Fresco Logic host controllers advertise MSI, but fail to |
| 381 | * generate interrupts. Don't even try to enable MSI. |
| 382 | */ |
| 383 | if (xhci->quirks & XHCI_BROKEN_MSI) |
Hannes Reinecke | 00eed9c | 2013-03-04 17:14:43 +0100 | [diff] [blame] | 384 | goto legacy_irq; |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 385 | |
| 386 | /* unregister the legacy interrupt */ |
| 387 | if (hcd->irq) |
| 388 | free_irq(hcd->irq, hcd); |
Felipe Balbi | cd70469 | 2012-02-29 16:46:23 +0200 | [diff] [blame] | 389 | hcd->irq = 0; |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 390 | |
| 391 | ret = xhci_setup_msix(xhci); |
| 392 | if (ret) |
| 393 | /* fall back to msi*/ |
| 394 | ret = xhci_setup_msi(xhci); |
| 395 | |
| 396 | if (!ret) |
Felipe Balbi | cd70469 | 2012-02-29 16:46:23 +0200 | [diff] [blame] | 397 | /* hcd->irq is 0, we have MSI */ |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 398 | return 0; |
| 399 | |
Sarah Sharp | 68d07f6 | 2012-02-13 16:25:57 -0800 | [diff] [blame] | 400 | if (!pdev->irq) { |
| 401 | xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); |
| 402 | return -EINVAL; |
| 403 | } |
| 404 | |
Hannes Reinecke | 00eed9c | 2013-03-04 17:14:43 +0100 | [diff] [blame] | 405 | legacy_irq: |
Adrian Huang | 7969943 | 2014-02-27 11:26:03 +0000 | [diff] [blame] | 406 | if (!strlen(hcd->irq_descr)) |
| 407 | snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", |
| 408 | hcd->driver->description, hcd->self.busnum); |
| 409 | |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 410 | /* fall back to legacy interrupt*/ |
| 411 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, |
| 412 | hcd->irq_descr, hcd); |
| 413 | if (ret) { |
| 414 | xhci_err(xhci, "request interrupt %d failed\n", |
| 415 | pdev->irq); |
| 416 | return ret; |
| 417 | } |
| 418 | hcd->irq = pdev->irq; |
| 419 | return 0; |
| 420 | } |
| 421 | |
| 422 | #else |
| 423 | |
David Cohen | 01bb59e | 2014-04-25 19:20:16 +0300 | [diff] [blame] | 424 | static inline int xhci_try_enable_msi(struct usb_hcd *hcd) |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 425 | { |
| 426 | return 0; |
| 427 | } |
| 428 | |
David Cohen | 01bb59e | 2014-04-25 19:20:16 +0300 | [diff] [blame] | 429 | static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 430 | { |
| 431 | } |
| 432 | |
David Cohen | 01bb59e | 2014-04-25 19:20:16 +0300 | [diff] [blame] | 433 | static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 434 | { |
| 435 | } |
| 436 | |
| 437 | #endif |
| 438 | |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 439 | static void compliance_mode_recovery(unsigned long arg) |
| 440 | { |
| 441 | struct xhci_hcd *xhci; |
| 442 | struct usb_hcd *hcd; |
| 443 | u32 temp; |
| 444 | int i; |
| 445 | |
| 446 | xhci = (struct xhci_hcd *)arg; |
| 447 | |
| 448 | for (i = 0; i < xhci->num_usb3_ports; i++) { |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 449 | temp = readl(xhci->usb3_ports[i]); |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 450 | if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { |
| 451 | /* |
| 452 | * Compliance Mode Detected. Letting USB Core |
| 453 | * handle the Warm Reset |
| 454 | */ |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 455 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 456 | "Compliance mode detected->port %d", |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 457 | i + 1); |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 458 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 459 | "Attempting compliance mode recovery"); |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 460 | hcd = xhci->shared_hcd; |
| 461 | |
| 462 | if (hcd->state == HC_STATE_SUSPENDED) |
| 463 | usb_hcd_resume_root_hub(hcd); |
| 464 | |
| 465 | usb_hcd_poll_rh_status(hcd); |
| 466 | } |
| 467 | } |
| 468 | |
| 469 | if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) |
| 470 | mod_timer(&xhci->comp_mode_recovery_timer, |
| 471 | jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); |
| 472 | } |
| 473 | |
| 474 | /* |
| 475 | * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver |
| 476 | * that causes ports behind that hardware to enter compliance mode sometimes. |
| 477 | * The quirk creates a timer that polls every 2 seconds the link state of |
| 478 | * each host controller's port and recovers it by issuing a Warm reset |
| 479 | * if Compliance mode is detected, otherwise the port will become "dead" (no |
| 480 | * device connections or disconnections will be detected anymore). Becasue no |
| 481 | * status event is generated when entering compliance mode (per xhci spec), |
| 482 | * this quirk is needed on systems that have the failing hardware installed. |
| 483 | */ |
| 484 | static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) |
| 485 | { |
| 486 | xhci->port_status_u0 = 0; |
Julia Lawall | fc8abe0 | 2015-01-09 16:06:29 +0200 | [diff] [blame] | 487 | setup_timer(&xhci->comp_mode_recovery_timer, |
| 488 | compliance_mode_recovery, (unsigned long)xhci); |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 489 | xhci->comp_mode_recovery_timer.expires = jiffies + |
| 490 | msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); |
| 491 | |
| 492 | set_timer_slack(&xhci->comp_mode_recovery_timer, |
| 493 | msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); |
| 494 | add_timer(&xhci->comp_mode_recovery_timer); |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 495 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 496 | "Compliance mode recovery timer initialized"); |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 497 | } |
| 498 | |
| 499 | /* |
| 500 | * This function identifies the systems that have installed the SN65LVPE502CP |
| 501 | * USB3.0 re-driver and that need the Compliance Mode Quirk. |
| 502 | * Systems: |
| 503 | * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 |
| 504 | */ |
Andrew Bresticker | e1cd972 | 2014-10-03 11:35:27 +0300 | [diff] [blame] | 505 | static bool xhci_compliance_mode_recovery_timer_quirk_check(void) |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 506 | { |
| 507 | const char *dmi_product_name, *dmi_sys_vendor; |
| 508 | |
| 509 | dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); |
| 510 | dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); |
Vivek Gautam | 457a73d | 2012-09-22 18:11:19 +0530 | [diff] [blame] | 511 | if (!dmi_product_name || !dmi_sys_vendor) |
| 512 | return false; |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 513 | |
| 514 | if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) |
| 515 | return false; |
| 516 | |
| 517 | if (strstr(dmi_product_name, "Z420") || |
| 518 | strstr(dmi_product_name, "Z620") || |
Alexis R. Cortes | 4708097 | 2012-10-17 14:09:12 -0500 | [diff] [blame] | 519 | strstr(dmi_product_name, "Z820") || |
Alexis R. Cortes | b0e4e60 | 2012-11-08 16:59:27 -0600 | [diff] [blame] | 520 | strstr(dmi_product_name, "Z1 Workstation")) |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 521 | return true; |
| 522 | |
| 523 | return false; |
| 524 | } |
| 525 | |
| 526 | static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) |
| 527 | { |
| 528 | return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); |
| 529 | } |
| 530 | |
| 531 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 532 | /* |
| 533 | * Initialize memory for HCD and xHC (one-time init). |
| 534 | * |
| 535 | * Program the PAGESIZE register, initialize the device context array, create |
| 536 | * device contexts (?), set up a command ring segment (or two?), create event |
| 537 | * ring (one for now). |
| 538 | */ |
| 539 | int xhci_init(struct usb_hcd *hcd) |
| 540 | { |
| 541 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 542 | int retval = 0; |
| 543 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 544 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 545 | spin_lock_init(&xhci->lock); |
Sebastian Andrzej Siewior | d782659 | 2011-09-13 16:41:10 -0700 | [diff] [blame] | 546 | if (xhci->hci_version == 0x95 && link_quirk) { |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 547 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 548 | "QUIRK: Not clearing Link TRB chain bits."); |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 549 | xhci->quirks |= XHCI_LINK_TRB_QUIRK; |
| 550 | } else { |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 551 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 552 | "xHCI doesn't need link TRB QUIRK"); |
Sarah Sharp | b0567b3 | 2009-08-07 14:04:36 -0700 | [diff] [blame] | 553 | } |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 554 | retval = xhci_mem_init(xhci, GFP_KERNEL); |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 555 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 556 | |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 557 | /* Initializing Compliance Mode Recovery Data If Needed */ |
Sarah Sharp | c3897aa | 2013-04-18 10:02:03 -0700 | [diff] [blame] | 558 | if (xhci_compliance_mode_recovery_timer_quirk_check()) { |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 559 | xhci->quirks |= XHCI_COMP_MODE_QUIRK; |
| 560 | compliance_mode_recovery_timer_init(xhci); |
| 561 | } |
| 562 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 563 | return retval; |
| 564 | } |
| 565 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 566 | /*-------------------------------------------------------------------------*/ |
| 567 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 568 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 569 | static int xhci_run_finished(struct xhci_hcd *xhci) |
| 570 | { |
| 571 | if (xhci_start(xhci)) { |
| 572 | xhci_halt(xhci); |
| 573 | return -ENODEV; |
| 574 | } |
| 575 | xhci->shared_hcd->state = HC_STATE_RUNNING; |
Elric Fu | c181bc5 | 2012-06-27 16:30:57 +0800 | [diff] [blame] | 576 | xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 577 | |
| 578 | if (xhci->quirks & XHCI_NEC_HOST) |
| 579 | xhci_ring_cmd_db(xhci); |
| 580 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 581 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 582 | "Finished xhci_run for USB3 roothub"); |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 583 | return 0; |
| 584 | } |
| 585 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 586 | /* |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 587 | * Start the HC after it was halted. |
| 588 | * |
| 589 | * This function is called by the USB core when the HC driver is added. |
| 590 | * Its opposite is xhci_stop(). |
| 591 | * |
| 592 | * xhci_init() must be called once before this function can be called. |
| 593 | * Reset the HC, enable device slot contexts, program DCBAAP, and |
| 594 | * set command ring pointer and event ring pointer. |
| 595 | * |
| 596 | * Setup MSI-X vectors and enable interrupts. |
| 597 | */ |
| 598 | int xhci_run(struct usb_hcd *hcd) |
| 599 | { |
| 600 | u32 temp; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 601 | u64 temp_64; |
Sebastian Andrzej Siewior | 3fd1ec5 | 2011-09-23 14:19:57 -0700 | [diff] [blame] | 602 | int ret; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 603 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 604 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 605 | /* Start the xHCI host controller running only after the USB 2.0 roothub |
| 606 | * is setup. |
| 607 | */ |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 608 | |
Sarah Sharp | 0f2a793 | 2009-04-27 19:57:12 -0700 | [diff] [blame] | 609 | hcd->uses_new_polling = 1; |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 610 | if (!usb_hcd_is_primary_hcd(hcd)) |
| 611 | return xhci_run_finished(xhci); |
Sarah Sharp | 0f2a793 | 2009-04-27 19:57:12 -0700 | [diff] [blame] | 612 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 613 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 614 | |
Sebastian Andrzej Siewior | 3fd1ec5 | 2011-09-23 14:19:57 -0700 | [diff] [blame] | 615 | ret = xhci_try_enable_msi(hcd); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 616 | if (ret) |
Sebastian Andrzej Siewior | 3fd1ec5 | 2011-09-23 14:19:57 -0700 | [diff] [blame] | 617 | return ret; |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 618 | |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 619 | xhci_dbg(xhci, "Command ring memory map follows:\n"); |
| 620 | xhci_debug_ring(xhci, xhci->cmd_ring); |
| 621 | xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); |
| 622 | xhci_dbg_cmd_ptrs(xhci); |
| 623 | |
| 624 | xhci_dbg(xhci, "ERST memory map follows:\n"); |
| 625 | xhci_dbg_erst(xhci, &xhci->erst); |
| 626 | xhci_dbg(xhci, "Event ring:\n"); |
| 627 | xhci_debug_ring(xhci, xhci->event_ring); |
| 628 | xhci_dbg_ring_ptrs(xhci, xhci->event_ring); |
Sarah Sharp | f7b2e40 | 2014-01-30 13:27:49 -0800 | [diff] [blame] | 629 | temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 630 | temp_64 &= ~ERST_PTR_MASK; |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 631 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 632 | "ERST deq = 64'h%0lx", (long unsigned int) temp_64); |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 633 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 634 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 635 | "// Set the interrupt modulation register"); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 636 | temp = readl(&xhci->ir_set->irq_control); |
Sarah Sharp | a4d8830 | 2009-05-14 11:44:26 -0700 | [diff] [blame] | 637 | temp &= ~ER_IRQ_INTERVAL_MASK; |
Chunfeng Yun | 0cbd4b3 | 2015-11-24 13:09:55 +0200 | [diff] [blame] | 638 | /* |
| 639 | * the increment interval is 8 times as much as that defined |
| 640 | * in xHCI spec on MTK's controller |
| 641 | */ |
| 642 | temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 643 | writel(temp, &xhci->ir_set->irq_control); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 644 | |
| 645 | /* Set the HCD state before we enable the irqs */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 646 | temp = readl(&xhci->op_regs->command); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 647 | temp |= (CMD_EIE); |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 648 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 649 | "// Enable interrupts, cmd = 0x%x.", temp); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 650 | writel(temp, &xhci->op_regs->command); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 651 | |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 652 | temp = readl(&xhci->ir_set->irq_pending); |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 653 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 654 | "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 655 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 656 | writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 657 | xhci_print_ir_set(xhci, 0); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 658 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 659 | if (xhci->quirks & XHCI_NEC_HOST) { |
| 660 | struct xhci_command *command; |
| 661 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); |
| 662 | if (!command) |
| 663 | return -ENOMEM; |
| 664 | xhci_queue_vendor_command(xhci, command, 0, 0, 0, |
Sarah Sharp | 0238634 | 2010-05-24 13:25:28 -0700 | [diff] [blame] | 665 | TRB_TYPE(TRB_NEC_GET_FW)); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 666 | } |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 667 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 668 | "Finished xhci_run for USB2 roothub"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 669 | return 0; |
| 670 | } |
Andrew Bresticker | 436e8c7 | 2014-10-03 11:35:28 +0300 | [diff] [blame] | 671 | EXPORT_SYMBOL_GPL(xhci_run); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 672 | |
| 673 | /* |
| 674 | * Stop xHCI driver. |
| 675 | * |
| 676 | * This function is called by the USB core when the HC driver is removed. |
| 677 | * Its opposite is xhci_run(). |
| 678 | * |
| 679 | * Disable device contexts, disable IRQs, and quiesce the HC. |
| 680 | * Reset the HC, finish any completed transactions, and cleanup memory. |
| 681 | */ |
| 682 | void xhci_stop(struct usb_hcd *hcd) |
| 683 | { |
| 684 | u32 temp; |
| 685 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 686 | |
Roger Quadros | 8c24d6d | 2015-09-21 17:46:14 +0300 | [diff] [blame] | 687 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 688 | return; |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 689 | |
Roger Quadros | 8c24d6d | 2015-09-21 17:46:14 +0300 | [diff] [blame] | 690 | mutex_lock(&xhci->mutex); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 691 | spin_lock_irq(&xhci->lock); |
Roger Quadros | 8c24d6d | 2015-09-21 17:46:14 +0300 | [diff] [blame] | 692 | xhci->xhc_state |= XHCI_STATE_HALTED; |
| 693 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
| 694 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 695 | /* Make sure the xHC is halted for a USB3 roothub |
| 696 | * (xhci_stop() could be called as part of failed init). |
| 697 | */ |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 698 | xhci_halt(xhci); |
| 699 | xhci_reset(xhci); |
| 700 | spin_unlock_irq(&xhci->lock); |
| 701 | |
Zhang Rui | 40a9fb1 | 2010-12-17 13:17:04 -0800 | [diff] [blame] | 702 | xhci_cleanup_msix(xhci); |
| 703 | |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 704 | /* Deleting Compliance Mode Recovery Timer */ |
| 705 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
Tony Camuso | 58b1d79 | 2013-04-05 14:27:07 -0400 | [diff] [blame] | 706 | (!(xhci_all_ports_seen_u0(xhci)))) { |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 707 | del_timer_sync(&xhci->comp_mode_recovery_timer); |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 708 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 709 | "%s: compliance mode recovery timer deleted", |
Tony Camuso | 58b1d79 | 2013-04-05 14:27:07 -0400 | [diff] [blame] | 710 | __func__); |
| 711 | } |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 712 | |
Andiry Xu | c41136b | 2011-03-22 17:08:14 +0800 | [diff] [blame] | 713 | if (xhci->quirks & XHCI_AMD_PLL_FIX) |
| 714 | usb_amd_dev_put(); |
| 715 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 716 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 717 | "// Disabling event ring interrupts"); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 718 | temp = readl(&xhci->op_regs->status); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 719 | writel(temp & ~STS_EINT, &xhci->op_regs->status); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 720 | temp = readl(&xhci->ir_set->irq_pending); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 721 | writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 722 | xhci_print_ir_set(xhci, 0); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 723 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 724 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 725 | xhci_mem_cleanup(xhci); |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 726 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 727 | "xhci_stop completed - status = %x", |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 728 | readl(&xhci->op_regs->status)); |
Roger Quadros | 85ac90f | 2015-09-21 17:46:12 +0300 | [diff] [blame] | 729 | mutex_unlock(&xhci->mutex); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 730 | } |
| 731 | |
| 732 | /* |
| 733 | * Shutdown HC (not bus-specific) |
| 734 | * |
| 735 | * This is called when the machine is rebooting or halting. We assume that the |
| 736 | * machine will be powered off, and the HC's internal state will be reset. |
| 737 | * Don't bother to free memory. |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 738 | * |
| 739 | * This will only ever be called with the main usb_hcd (the USB3 roothub). |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 740 | */ |
| 741 | void xhci_shutdown(struct usb_hcd *hcd) |
| 742 | { |
| 743 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 744 | |
Dan Carpenter | 052c7f9 | 2012-08-13 19:57:03 +0300 | [diff] [blame] | 745 | if (xhci->quirks & XHCI_SPURIOUS_REBOOT) |
Sarah Sharp | e95829f | 2012-07-23 18:59:30 +0300 | [diff] [blame] | 746 | usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); |
| 747 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 748 | spin_lock_irq(&xhci->lock); |
| 749 | xhci_halt(xhci); |
Takashi Iwai | 638298d | 2013-09-12 08:11:06 +0200 | [diff] [blame] | 750 | /* Workaround for spurious wakeups at shutdown with HSW */ |
| 751 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) |
| 752 | xhci_reset(xhci); |
Dong Nguyen | 43b86af | 2010-07-21 16:56:08 -0700 | [diff] [blame] | 753 | spin_unlock_irq(&xhci->lock); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 754 | |
Zhang Rui | 40a9fb1 | 2010-12-17 13:17:04 -0800 | [diff] [blame] | 755 | xhci_cleanup_msix(xhci); |
| 756 | |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 757 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 758 | "xhci_shutdown completed - status = %x", |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 759 | readl(&xhci->op_regs->status)); |
Takashi Iwai | 638298d | 2013-09-12 08:11:06 +0200 | [diff] [blame] | 760 | |
| 761 | /* Yet another workaround for spurious wakeups at shutdown with HSW */ |
| 762 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) |
| 763 | pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot); |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 764 | } |
| 765 | |
Sarah Sharp | b5b5c3a | 2010-10-15 11:24:14 -0700 | [diff] [blame] | 766 | #ifdef CONFIG_PM |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 767 | static void xhci_save_registers(struct xhci_hcd *xhci) |
| 768 | { |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 769 | xhci->s3.command = readl(&xhci->op_regs->command); |
| 770 | xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); |
Sarah Sharp | f7b2e40 | 2014-01-30 13:27:49 -0800 | [diff] [blame] | 771 | xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 772 | xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); |
| 773 | xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); |
Sarah Sharp | f7b2e40 | 2014-01-30 13:27:49 -0800 | [diff] [blame] | 774 | xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
| 775 | xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 776 | xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); |
| 777 | xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 778 | } |
| 779 | |
| 780 | static void xhci_restore_registers(struct xhci_hcd *xhci) |
| 781 | { |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 782 | writel(xhci->s3.command, &xhci->op_regs->command); |
| 783 | writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); |
Sarah Sharp | 477632d | 2014-01-29 14:02:00 -0800 | [diff] [blame] | 784 | xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 785 | writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); |
| 786 | writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); |
Sarah Sharp | 477632d | 2014-01-29 14:02:00 -0800 | [diff] [blame] | 787 | xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); |
| 788 | xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 789 | writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); |
| 790 | writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 791 | } |
| 792 | |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 793 | static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) |
| 794 | { |
| 795 | u64 val_64; |
| 796 | |
| 797 | /* step 2: initialize command ring buffer */ |
Sarah Sharp | f7b2e40 | 2014-01-30 13:27:49 -0800 | [diff] [blame] | 798 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 799 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | |
| 800 | (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, |
| 801 | xhci->cmd_ring->dequeue) & |
| 802 | (u64) ~CMD_RING_RSVD_BITS) | |
| 803 | xhci->cmd_ring->cycle_state; |
Xenia Ragiadakou | d195fcf | 2013-08-14 06:33:55 +0300 | [diff] [blame] | 804 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
| 805 | "// Setting command ring address to 0x%llx", |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 806 | (long unsigned long) val_64); |
Sarah Sharp | 477632d | 2014-01-29 14:02:00 -0800 | [diff] [blame] | 807 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | /* |
| 811 | * The whole command ring must be cleared to zero when we suspend the host. |
| 812 | * |
| 813 | * The host doesn't save the command ring pointer in the suspend well, so we |
| 814 | * need to re-program it on resume. Unfortunately, the pointer must be 64-byte |
| 815 | * aligned, because of the reserved bits in the command ring dequeue pointer |
| 816 | * register. Therefore, we can't just set the dequeue pointer back in the |
| 817 | * middle of the ring (TRBs are 16-byte aligned). |
| 818 | */ |
| 819 | static void xhci_clear_command_ring(struct xhci_hcd *xhci) |
| 820 | { |
| 821 | struct xhci_ring *ring; |
| 822 | struct xhci_segment *seg; |
| 823 | |
| 824 | ring = xhci->cmd_ring; |
| 825 | seg = ring->deq_seg; |
| 826 | do { |
Andiry Xu | 158886c | 2011-11-30 16:37:41 +0800 | [diff] [blame] | 827 | memset(seg->trbs, 0, |
| 828 | sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); |
| 829 | seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= |
| 830 | cpu_to_le32(~TRB_CYCLE); |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 831 | seg = seg->next; |
| 832 | } while (seg != ring->deq_seg); |
| 833 | |
| 834 | /* Reset the software enqueue and dequeue pointers */ |
| 835 | ring->deq_seg = ring->first_seg; |
| 836 | ring->dequeue = ring->first_seg->trbs; |
| 837 | ring->enq_seg = ring->deq_seg; |
| 838 | ring->enqueue = ring->dequeue; |
| 839 | |
Andiry Xu | b008df6 | 2012-03-05 17:49:34 +0800 | [diff] [blame] | 840 | ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 841 | /* |
| 842 | * Ring is now zeroed, so the HW should look for change of ownership |
| 843 | * when the cycle bit is set to 1. |
| 844 | */ |
| 845 | ring->cycle_state = 1; |
| 846 | |
| 847 | /* |
| 848 | * Reset the hardware dequeue pointer. |
| 849 | * Yes, this will need to be re-written after resume, but we're paranoid |
| 850 | * and want to make sure the hardware doesn't access bogus memory |
| 851 | * because, say, the BIOS or an SMI started the host without changing |
| 852 | * the command ring pointers. |
| 853 | */ |
| 854 | xhci_set_cmd_ring_deq(xhci); |
| 855 | } |
| 856 | |
Lu Baolu | a1377e5 | 2014-11-18 11:27:14 +0200 | [diff] [blame] | 857 | static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) |
| 858 | { |
| 859 | int port_index; |
| 860 | __le32 __iomem **port_array; |
| 861 | unsigned long flags; |
| 862 | u32 t1, t2; |
| 863 | |
| 864 | spin_lock_irqsave(&xhci->lock, flags); |
| 865 | |
| 866 | /* disble usb3 ports Wake bits*/ |
| 867 | port_index = xhci->num_usb3_ports; |
| 868 | port_array = xhci->usb3_ports; |
| 869 | while (port_index--) { |
| 870 | t1 = readl(port_array[port_index]); |
| 871 | t1 = xhci_port_state_to_neutral(t1); |
| 872 | t2 = t1 & ~PORT_WAKE_BITS; |
| 873 | if (t1 != t2) |
| 874 | writel(t2, port_array[port_index]); |
| 875 | } |
| 876 | |
| 877 | /* disble usb2 ports Wake bits*/ |
| 878 | port_index = xhci->num_usb2_ports; |
| 879 | port_array = xhci->usb2_ports; |
| 880 | while (port_index--) { |
| 881 | t1 = readl(port_array[port_index]); |
| 882 | t1 = xhci_port_state_to_neutral(t1); |
| 883 | t2 = t1 & ~PORT_WAKE_BITS; |
| 884 | if (t1 != t2) |
| 885 | writel(t2, port_array[port_index]); |
| 886 | } |
| 887 | |
| 888 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 889 | } |
| 890 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 891 | /* |
| 892 | * Stop HC (not bus-specific) |
| 893 | * |
| 894 | * This is called when the machine transition into S3/S4 mode. |
| 895 | * |
| 896 | */ |
Lu Baolu | a1377e5 | 2014-11-18 11:27:14 +0200 | [diff] [blame] | 897 | int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 898 | { |
| 899 | int rc = 0; |
Oliver Neukum | 455f589 | 2013-09-30 15:50:54 +0200 | [diff] [blame] | 900 | unsigned int delay = XHCI_MAX_HALT_USEC; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 901 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
| 902 | u32 command; |
| 903 | |
Roger Quadros | 9fa733f | 2015-05-29 17:01:50 +0300 | [diff] [blame] | 904 | if (!hcd->state) |
| 905 | return 0; |
| 906 | |
Felipe Balbi | 77b8476 | 2012-10-19 10:55:16 +0300 | [diff] [blame] | 907 | if (hcd->state != HC_STATE_SUSPENDED || |
| 908 | xhci->shared_hcd->state != HC_STATE_SUSPENDED) |
| 909 | return -EINVAL; |
| 910 | |
Lu Baolu | a1377e5 | 2014-11-18 11:27:14 +0200 | [diff] [blame] | 911 | /* Clear root port wake on bits if wakeup not allowed. */ |
| 912 | if (!do_wakeup) |
| 913 | xhci_disable_port_wake_on_bits(xhci); |
| 914 | |
Sarah Sharp | c52804a | 2012-11-27 12:30:23 -0800 | [diff] [blame] | 915 | /* Don't poll the roothubs on bus suspend. */ |
| 916 | xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); |
| 917 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
| 918 | del_timer_sync(&hcd->rh_timer); |
Al Cooper | 14e61a1 | 2014-08-20 16:41:57 +0300 | [diff] [blame] | 919 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
| 920 | del_timer_sync(&xhci->shared_hcd->rh_timer); |
Sarah Sharp | c52804a | 2012-11-27 12:30:23 -0800 | [diff] [blame] | 921 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 922 | spin_lock_irq(&xhci->lock); |
| 923 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
Sarah Sharp | b320937 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 924 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 925 | /* step 1: stop endpoint */ |
| 926 | /* skipped assuming that port suspend has done */ |
| 927 | |
| 928 | /* step 2: clear Run/Stop bit */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 929 | command = readl(&xhci->op_regs->command); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 930 | command &= ~CMD_RUN; |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 931 | writel(command, &xhci->op_regs->command); |
Oliver Neukum | 455f589 | 2013-09-30 15:50:54 +0200 | [diff] [blame] | 932 | |
| 933 | /* Some chips from Fresco Logic need an extraordinary delay */ |
| 934 | delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; |
| 935 | |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 936 | if (xhci_handshake(&xhci->op_regs->status, |
Oliver Neukum | 455f589 | 2013-09-30 15:50:54 +0200 | [diff] [blame] | 937 | STS_HALT, STS_HALT, delay)) { |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 938 | xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); |
| 939 | spin_unlock_irq(&xhci->lock); |
| 940 | return -ETIMEDOUT; |
| 941 | } |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 942 | xhci_clear_command_ring(xhci); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 943 | |
| 944 | /* step 3: save registers */ |
| 945 | xhci_save_registers(xhci); |
| 946 | |
| 947 | /* step 4: set CSS flag */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 948 | command = readl(&xhci->op_regs->command); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 949 | command |= CMD_CSS; |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 950 | writel(command, &xhci->op_regs->command); |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 951 | if (xhci_handshake(&xhci->op_regs->status, |
Sarah Sharp | 2611bd18 | 2012-10-25 13:27:51 -0700 | [diff] [blame] | 952 | STS_SAVE, 0, 10 * 1000)) { |
Andiry Xu | 622eb78 | 2012-06-13 10:51:57 +0800 | [diff] [blame] | 953 | xhci_warn(xhci, "WARN: xHC save state timeout\n"); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 954 | spin_unlock_irq(&xhci->lock); |
| 955 | return -ETIMEDOUT; |
| 956 | } |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 957 | spin_unlock_irq(&xhci->lock); |
| 958 | |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 959 | /* |
| 960 | * Deleting Compliance Mode Recovery Timer because the xHCI Host |
| 961 | * is about to be suspended. |
| 962 | */ |
| 963 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
| 964 | (!(xhci_all_ports_seen_u0(xhci)))) { |
| 965 | del_timer_sync(&xhci->comp_mode_recovery_timer); |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 966 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 967 | "%s: compliance mode recovery timer deleted", |
Tony Camuso | 58b1d79 | 2013-04-05 14:27:07 -0400 | [diff] [blame] | 968 | __func__); |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 969 | } |
| 970 | |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 971 | /* step 5: remove core well power */ |
| 972 | /* synchronize irq when using MSI-X */ |
Sebastian Andrzej Siewior | 421aa84 | 2011-09-23 14:19:58 -0700 | [diff] [blame] | 973 | xhci_msix_sync_irqs(xhci); |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 974 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 975 | return rc; |
| 976 | } |
Andrew Bresticker | 436e8c7 | 2014-10-03 11:35:28 +0300 | [diff] [blame] | 977 | EXPORT_SYMBOL_GPL(xhci_suspend); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 978 | |
| 979 | /* |
| 980 | * start xHC (not bus-specific) |
| 981 | * |
| 982 | * This is called when the machine transition from S3/S4 mode. |
| 983 | * |
| 984 | */ |
| 985 | int xhci_resume(struct xhci_hcd *xhci, bool hibernated) |
| 986 | { |
Wang, Yu | d6236f6 | 2014-06-24 17:14:44 +0300 | [diff] [blame] | 987 | u32 command, temp = 0, status; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 988 | struct usb_hcd *hcd = xhci_to_hcd(xhci); |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 989 | struct usb_hcd *secondary_hcd; |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 990 | int retval = 0; |
Tony Camuso | 77df9e0 | 2013-02-21 16:11:27 -0500 | [diff] [blame] | 991 | bool comp_timer_running = false; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 992 | |
Roger Quadros | 9fa733f | 2015-05-29 17:01:50 +0300 | [diff] [blame] | 993 | if (!hcd->state) |
| 994 | return 0; |
| 995 | |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 996 | /* Wait a bit if either of the roothubs need to settle from the |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 997 | * transition into bus suspend. |
Sarah Sharp | 20b67cf | 2010-12-15 12:47:14 -0800 | [diff] [blame] | 998 | */ |
Sarah Sharp | f6ff0ac | 2010-12-16 11:21:10 -0800 | [diff] [blame] | 999 | if (time_before(jiffies, xhci->bus_state[0].next_statechange) || |
| 1000 | time_before(jiffies, |
| 1001 | xhci->bus_state[1].next_statechange)) |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1002 | msleep(100); |
| 1003 | |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 1004 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
| 1005 | set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
| 1006 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1007 | spin_lock_irq(&xhci->lock); |
Maarten Lankhorst | c877b3b | 2011-06-15 23:47:21 +0200 | [diff] [blame] | 1008 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
| 1009 | hibernated = true; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1010 | |
| 1011 | if (!hibernated) { |
| 1012 | /* step 1: restore register */ |
| 1013 | xhci_restore_registers(xhci); |
| 1014 | /* step 2: initialize command ring buffer */ |
Sarah Sharp | 8982132 | 2010-11-12 11:59:31 -0800 | [diff] [blame] | 1015 | xhci_set_cmd_ring_deq(xhci); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1016 | /* step 3: restore state and start state*/ |
| 1017 | /* step 3: set CRS flag */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 1018 | command = readl(&xhci->op_regs->command); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1019 | command |= CMD_CRS; |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 1020 | writel(command, &xhci->op_regs->command); |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 1021 | if (xhci_handshake(&xhci->op_regs->status, |
Andiry Xu | 622eb78 | 2012-06-13 10:51:57 +0800 | [diff] [blame] | 1022 | STS_RESTORE, 0, 10 * 1000)) { |
| 1023 | xhci_warn(xhci, "WARN: xHC restore state timeout\n"); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1024 | spin_unlock_irq(&xhci->lock); |
| 1025 | return -ETIMEDOUT; |
| 1026 | } |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 1027 | temp = readl(&xhci->op_regs->status); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1028 | } |
| 1029 | |
| 1030 | /* If restore operation fails, re-initialize the HC during resume */ |
| 1031 | if ((temp & STS_SRE) || hibernated) { |
Tony Camuso | 77df9e0 | 2013-02-21 16:11:27 -0500 | [diff] [blame] | 1032 | |
| 1033 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
| 1034 | !(xhci_all_ports_seen_u0(xhci))) { |
| 1035 | del_timer_sync(&xhci->comp_mode_recovery_timer); |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 1036 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 1037 | "Compliance Mode Recovery Timer deleted!"); |
Tony Camuso | 77df9e0 | 2013-02-21 16:11:27 -0500 | [diff] [blame] | 1038 | } |
| 1039 | |
Sarah Sharp | fedd383 | 2011-04-12 17:43:19 -0700 | [diff] [blame] | 1040 | /* Let the USB core know _both_ roothubs lost power. */ |
| 1041 | usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); |
| 1042 | usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1043 | |
| 1044 | xhci_dbg(xhci, "Stop HCD\n"); |
| 1045 | xhci_halt(xhci); |
| 1046 | xhci_reset(xhci); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1047 | spin_unlock_irq(&xhci->lock); |
Andiry Xu | 0029227 | 2010-12-27 17:39:02 +0800 | [diff] [blame] | 1048 | xhci_cleanup_msix(xhci); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1049 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1050 | xhci_dbg(xhci, "// Disabling event ring interrupts\n"); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 1051 | temp = readl(&xhci->op_regs->status); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 1052 | writel(temp & ~STS_EINT, &xhci->op_regs->status); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 1053 | temp = readl(&xhci->ir_set->irq_pending); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 1054 | writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); |
Dmitry Torokhov | 09ece30 | 2011-02-08 16:29:33 -0800 | [diff] [blame] | 1055 | xhci_print_ir_set(xhci, 0); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1056 | |
| 1057 | xhci_dbg(xhci, "cleaning up memory\n"); |
| 1058 | xhci_mem_cleanup(xhci); |
| 1059 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 1060 | readl(&xhci->op_regs->status)); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1061 | |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 1062 | /* USB core calls the PCI reinit and start functions twice: |
| 1063 | * first with the primary HCD, and then with the secondary HCD. |
| 1064 | * If we don't do the same, the host will never be started. |
| 1065 | */ |
| 1066 | if (!usb_hcd_is_primary_hcd(hcd)) |
| 1067 | secondary_hcd = hcd; |
| 1068 | else |
| 1069 | secondary_hcd = xhci->shared_hcd; |
| 1070 | |
| 1071 | xhci_dbg(xhci, "Initialize the xhci_hcd\n"); |
| 1072 | retval = xhci_init(hcd->primary_hcd); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1073 | if (retval) |
| 1074 | return retval; |
Tony Camuso | 77df9e0 | 2013-02-21 16:11:27 -0500 | [diff] [blame] | 1075 | comp_timer_running = true; |
| 1076 | |
Sarah Sharp | 65b22f9 | 2010-12-17 12:35:05 -0800 | [diff] [blame] | 1077 | xhci_dbg(xhci, "Start the primary HCD\n"); |
| 1078 | retval = xhci_run(hcd->primary_hcd); |
Sarah Sharp | b320937 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 1079 | if (!retval) { |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 1080 | xhci_dbg(xhci, "Start the secondary HCD\n"); |
| 1081 | retval = xhci_run(secondary_hcd); |
Sarah Sharp | b320937 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 1082 | } |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1083 | hcd->state = HC_STATE_SUSPENDED; |
Sarah Sharp | b320937 | 2011-03-07 11:24:07 -0800 | [diff] [blame] | 1084 | xhci->shared_hcd->state = HC_STATE_SUSPENDED; |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 1085 | goto done; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1086 | } |
| 1087 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1088 | /* step 4: set Run/Stop bit */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 1089 | command = readl(&xhci->op_regs->command); |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1090 | command |= CMD_RUN; |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 1091 | writel(command, &xhci->op_regs->command); |
Lin Wang | dc0b177 | 2015-01-09 16:06:28 +0200 | [diff] [blame] | 1092 | xhci_handshake(&xhci->op_regs->status, STS_HALT, |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1093 | 0, 250 * 1000); |
| 1094 | |
| 1095 | /* step 5: walk topology and initialize portsc, |
| 1096 | * portpmsc and portli |
| 1097 | */ |
| 1098 | /* this is done in bus_resume */ |
| 1099 | |
| 1100 | /* step 6: restart each of the previously |
| 1101 | * Running endpoints by ringing their doorbells |
| 1102 | */ |
| 1103 | |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1104 | spin_unlock_irq(&xhci->lock); |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 1105 | |
| 1106 | done: |
| 1107 | if (retval == 0) { |
Wang, Yu | d6236f6 | 2014-06-24 17:14:44 +0300 | [diff] [blame] | 1108 | /* Resume root hubs only when have pending events. */ |
| 1109 | status = readl(&xhci->op_regs->status); |
| 1110 | if (status & STS_EINT) { |
| 1111 | usb_hcd_resume_root_hub(hcd); |
| 1112 | usb_hcd_resume_root_hub(xhci->shared_hcd); |
| 1113 | } |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 1114 | } |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 1115 | |
| 1116 | /* |
| 1117 | * If system is subject to the Quirk, Compliance Mode Timer needs to |
| 1118 | * be re-initialized Always after a system resume. Ports are subject |
| 1119 | * to suffer the Compliance Mode issue again. It doesn't matter if |
| 1120 | * ports have entered previously to U0 before system's suspension. |
| 1121 | */ |
Tony Camuso | 77df9e0 | 2013-02-21 16:11:27 -0500 | [diff] [blame] | 1122 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) |
Alexis R. Cortes | 71c731a | 2012-08-03 14:00:27 -0500 | [diff] [blame] | 1123 | compliance_mode_recovery_timer_init(xhci); |
| 1124 | |
Sarah Sharp | c52804a | 2012-11-27 12:30:23 -0800 | [diff] [blame] | 1125 | /* Re-enable port polling. */ |
| 1126 | xhci_dbg(xhci, "%s: starting port polling.\n", __func__); |
| 1127 | set_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
| 1128 | usb_hcd_poll_rh_status(hcd); |
Al Cooper | 14e61a1 | 2014-08-20 16:41:57 +0300 | [diff] [blame] | 1129 | set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
| 1130 | usb_hcd_poll_rh_status(xhci->shared_hcd); |
Sarah Sharp | c52804a | 2012-11-27 12:30:23 -0800 | [diff] [blame] | 1131 | |
Alan Stern | f69e3120 | 2011-11-03 11:37:10 -0400 | [diff] [blame] | 1132 | return retval; |
Andiry Xu | 5535b1d | 2010-10-14 07:23:06 -0700 | [diff] [blame] | 1133 | } |
Andrew Bresticker | 436e8c7 | 2014-10-03 11:35:28 +0300 | [diff] [blame] | 1134 | EXPORT_SYMBOL_GPL(xhci_resume); |
Sarah Sharp | b5b5c3a | 2010-10-15 11:24:14 -0700 | [diff] [blame] | 1135 | #endif /* CONFIG_PM */ |
| 1136 | |
Sarah Sharp | 7f84eef | 2009-04-27 19:53:56 -0700 | [diff] [blame] | 1137 | /*-------------------------------------------------------------------------*/ |
| 1138 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1139 | /** |
| 1140 | * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and |
| 1141 | * HCDs. Find the index for an endpoint given its descriptor. Use the return |
| 1142 | * value to right shift 1 for the bitmask. |
| 1143 | * |
| 1144 | * Index = (epnum * 2) + direction - 1, |
| 1145 | * where direction = 0 for OUT, 1 for IN. |
| 1146 | * For control endpoints, the IN index is used (OUT index is unused), so |
| 1147 | * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) |
| 1148 | */ |
| 1149 | unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) |
| 1150 | { |
| 1151 | unsigned int index; |
| 1152 | if (usb_endpoint_xfer_control(desc)) |
| 1153 | index = (unsigned int) (usb_endpoint_num(desc)*2); |
| 1154 | else |
| 1155 | index = (unsigned int) (usb_endpoint_num(desc)*2) + |
| 1156 | (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; |
| 1157 | return index; |
| 1158 | } |
| 1159 | |
Julius Werner | 01c5f44 | 2013-04-15 15:55:04 -0700 | [diff] [blame] | 1160 | /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint |
| 1161 | * address from the XHCI endpoint index. |
| 1162 | */ |
| 1163 | unsigned int xhci_get_endpoint_address(unsigned int ep_index) |
| 1164 | { |
| 1165 | unsigned int number = DIV_ROUND_UP(ep_index, 2); |
| 1166 | unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; |
| 1167 | return direction | number; |
| 1168 | } |
| 1169 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1170 | /* Find the flag for this endpoint (for use in the control context). Use the |
| 1171 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
| 1172 | * bit 1, etc. |
| 1173 | */ |
| 1174 | unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) |
| 1175 | { |
| 1176 | return 1 << (xhci_get_endpoint_index(desc) + 1); |
| 1177 | } |
| 1178 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 1179 | /* Find the flag for this endpoint (for use in the control context). Use the |
| 1180 | * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is |
| 1181 | * bit 1, etc. |
| 1182 | */ |
| 1183 | unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) |
| 1184 | { |
| 1185 | return 1 << (ep_index + 1); |
| 1186 | } |
| 1187 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1188 | /* Compute the last valid endpoint context index. Basically, this is the |
| 1189 | * endpoint index plus one. For slot contexts with more than valid endpoint, |
| 1190 | * we find the most significant bit set in the added contexts flags. |
| 1191 | * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 |
| 1192 | * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. |
| 1193 | */ |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 1194 | unsigned int xhci_last_valid_endpoint(u32 added_ctxs) |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1195 | { |
| 1196 | return fls(added_ctxs) - 1; |
| 1197 | } |
| 1198 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1199 | /* Returns 1 if the arguments are OK; |
| 1200 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
| 1201 | */ |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 1202 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1203 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
| 1204 | const char *func) { |
| 1205 | struct xhci_hcd *xhci; |
| 1206 | struct xhci_virt_device *virt_dev; |
| 1207 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1208 | if (!hcd || (check_ep && !ep) || !udev) { |
Xenia Ragiadakou | 5c1127d | 2013-07-02 17:49:26 +0300 | [diff] [blame] | 1209 | pr_debug("xHCI %s called with invalid args\n", func); |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1210 | return -EINVAL; |
| 1211 | } |
| 1212 | if (!udev->parent) { |
Xenia Ragiadakou | 5c1127d | 2013-07-02 17:49:26 +0300 | [diff] [blame] | 1213 | pr_debug("xHCI %s called for root hub\n", func); |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1214 | return 0; |
| 1215 | } |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1216 | |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 1217 | xhci = hcd_to_xhci(hcd); |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1218 | if (check_virt_dev) { |
sifram.rajas@gmail.com | 73ddc24 | 2011-09-02 11:06:00 -0700 | [diff] [blame] | 1219 | if (!udev->slot_id || !xhci->devs[udev->slot_id]) { |
Xenia Ragiadakou | 5c1127d | 2013-07-02 17:49:26 +0300 | [diff] [blame] | 1220 | xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", |
| 1221 | func); |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1222 | return -EINVAL; |
| 1223 | } |
| 1224 | |
| 1225 | virt_dev = xhci->devs[udev->slot_id]; |
| 1226 | if (virt_dev->udev != udev) { |
Xenia Ragiadakou | 5c1127d | 2013-07-02 17:49:26 +0300 | [diff] [blame] | 1227 | xhci_dbg(xhci, "xHCI %s called with udev and " |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1228 | "virt_dev does not match\n", func); |
| 1229 | return -EINVAL; |
| 1230 | } |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1231 | } |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1232 | |
Sarah Sharp | 203a866 | 2013-07-24 10:27:13 -0700 | [diff] [blame] | 1233 | if (xhci->xhc_state & XHCI_STATE_HALTED) |
| 1234 | return -ENODEV; |
| 1235 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1236 | return 1; |
| 1237 | } |
| 1238 | |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1239 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1240 | struct usb_device *udev, struct xhci_command *command, |
| 1241 | bool ctx_change, bool must_succeed); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1242 | |
| 1243 | /* |
| 1244 | * Full speed devices may have a max packet size greater than 8 bytes, but the |
| 1245 | * USB core doesn't know that until it reads the first 8 bytes of the |
| 1246 | * descriptor. If the usb_device's max packet size changes after that point, |
| 1247 | * we need to issue an evaluate context command and wait on it. |
| 1248 | */ |
| 1249 | static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, |
| 1250 | unsigned int ep_index, struct urb *urb) |
| 1251 | { |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1252 | struct xhci_container_ctx *out_ctx; |
| 1253 | struct xhci_input_control_ctx *ctrl_ctx; |
| 1254 | struct xhci_ep_ctx *ep_ctx; |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1255 | struct xhci_command *command; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1256 | int max_packet_size; |
| 1257 | int hw_max_packet_size; |
| 1258 | int ret = 0; |
| 1259 | |
| 1260 | out_ctx = xhci->devs[slot_id]->out_ctx; |
| 1261 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1262 | hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); |
Kuninori Morimoto | 29cc889 | 2011-08-23 03:12:03 -0700 | [diff] [blame] | 1263 | max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1264 | if (hw_max_packet_size != max_packet_size) { |
Xenia Ragiadakou | 3a7fa5b | 2013-07-31 07:35:27 +0300 | [diff] [blame] | 1265 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
| 1266 | "Max Packet Size for ep 0 changed."); |
| 1267 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
| 1268 | "Max packet size in usb_device = %d", |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1269 | max_packet_size); |
Xenia Ragiadakou | 3a7fa5b | 2013-07-31 07:35:27 +0300 | [diff] [blame] | 1270 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
| 1271 | "Max packet size in xHCI HW = %d", |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1272 | hw_max_packet_size); |
Xenia Ragiadakou | 3a7fa5b | 2013-07-31 07:35:27 +0300 | [diff] [blame] | 1273 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
| 1274 | "Issuing evaluate context command."); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1275 | |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1276 | /* Set up the input context flags for the command */ |
| 1277 | /* FIXME: This won't work if a non-default control endpoint |
| 1278 | * changes max packet sizes. |
| 1279 | */ |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1280 | |
| 1281 | command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); |
| 1282 | if (!command) |
| 1283 | return -ENOMEM; |
| 1284 | |
| 1285 | command->in_ctx = xhci->devs[slot_id]->in_ctx; |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 1286 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1287 | if (!ctrl_ctx) { |
| 1288 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 1289 | __func__); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1290 | ret = -ENOMEM; |
| 1291 | goto command_cleanup; |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1292 | } |
| 1293 | /* Set up the modified control endpoint 0 */ |
| 1294 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
| 1295 | xhci->devs[slot_id]->out_ctx, ep_index); |
| 1296 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1297 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1298 | ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); |
| 1299 | ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); |
| 1300 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1301 | ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1302 | ctrl_ctx->drop_flags = 0; |
| 1303 | |
| 1304 | xhci_dbg(xhci, "Slot %d input context\n", slot_id); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1305 | xhci_dbg_ctx(xhci, command->in_ctx, ep_index); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1306 | xhci_dbg(xhci, "Slot %d output context\n", slot_id); |
| 1307 | xhci_dbg_ctx(xhci, out_ctx, ep_index); |
| 1308 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1309 | ret = xhci_configure_endpoint(xhci, urb->dev, command, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1310 | true, false); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1311 | |
| 1312 | /* Clean up the input context for later use by bandwidth |
| 1313 | * functions. |
| 1314 | */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1315 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1316 | command_cleanup: |
| 1317 | kfree(command->completion); |
| 1318 | kfree(command); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1319 | } |
| 1320 | return ret; |
| 1321 | } |
| 1322 | |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1323 | /* |
| 1324 | * non-error returns are a promise to giveback() the urb later |
| 1325 | * we drop ownership so next owner (or urb unlink) can get it |
| 1326 | */ |
| 1327 | int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) |
| 1328 | { |
| 1329 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1330 | struct xhci_td *buffer; |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1331 | unsigned long flags; |
| 1332 | int ret = 0; |
| 1333 | unsigned int slot_id, ep_index; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1334 | struct urb_priv *urb_priv; |
| 1335 | int size, i; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1336 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1337 | if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, |
| 1338 | true, true, __func__) <= 0) |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1339 | return -EINVAL; |
| 1340 | |
| 1341 | slot_id = urb->dev->slot_id; |
| 1342 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1343 | |
Alan Stern | 541c7d4 | 2010-06-22 16:39:10 -0400 | [diff] [blame] | 1344 | if (!HCD_HW_ACCESSIBLE(hcd)) { |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1345 | if (!in_interrupt()) |
| 1346 | xhci_dbg(xhci, "urb submitted during PCI suspend\n"); |
| 1347 | ret = -ESHUTDOWN; |
| 1348 | goto exit; |
| 1349 | } |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1350 | |
| 1351 | if (usb_endpoint_xfer_isoc(&urb->ep->desc)) |
| 1352 | size = urb->number_of_packets; |
Reyad Attiyat | 4758dcd | 2015-08-06 19:23:58 +0300 | [diff] [blame] | 1353 | else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && |
| 1354 | urb->transfer_buffer_length > 0 && |
| 1355 | urb->transfer_flags & URB_ZERO_PACKET && |
| 1356 | !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) |
| 1357 | size = 2; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1358 | else |
| 1359 | size = 1; |
| 1360 | |
| 1361 | urb_priv = kzalloc(sizeof(struct urb_priv) + |
| 1362 | size * sizeof(struct xhci_td *), mem_flags); |
| 1363 | if (!urb_priv) |
| 1364 | return -ENOMEM; |
| 1365 | |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1366 | buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); |
| 1367 | if (!buffer) { |
| 1368 | kfree(urb_priv); |
| 1369 | return -ENOMEM; |
| 1370 | } |
| 1371 | |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1372 | for (i = 0; i < size; i++) { |
Andiry Xu | 2ffdea2 | 2011-09-02 11:05:57 -0700 | [diff] [blame] | 1373 | urb_priv->td[i] = buffer; |
| 1374 | buffer++; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1375 | } |
| 1376 | |
| 1377 | urb_priv->length = size; |
| 1378 | urb_priv->td_cnt = 0; |
| 1379 | urb->hcpriv = urb_priv; |
| 1380 | |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1381 | if (usb_endpoint_xfer_control(&urb->ep->desc)) { |
| 1382 | /* Check to see if the max packet size for the default control |
| 1383 | * endpoint changed during FS device enumeration |
| 1384 | */ |
| 1385 | if (urb->dev->speed == USB_SPEED_FULL) { |
| 1386 | ret = xhci_check_maxpacket(xhci, slot_id, |
| 1387 | ep_index, urb); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1388 | if (ret < 0) { |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 1389 | xhci_urb_free_priv(urb_priv); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1390 | urb->hcpriv = NULL; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1391 | return ret; |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1392 | } |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1393 | } |
| 1394 | |
Sarah Sharp | b11069f | 2009-07-27 12:03:23 -0700 | [diff] [blame] | 1395 | /* We have a spinlock and interrupts disabled, so we must pass |
| 1396 | * atomic context to this function, which may allocate memory. |
| 1397 | */ |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1398 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1399 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1400 | goto dying; |
Sarah Sharp | b11069f | 2009-07-27 12:03:23 -0700 | [diff] [blame] | 1401 | ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 1402 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1403 | if (ret) |
| 1404 | goto free_priv; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1405 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1406 | } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { |
| 1407 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1408 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1409 | goto dying; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 1410 | if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
| 1411 | EP_GETTING_STREAMS) { |
| 1412 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " |
| 1413 | "is transitioning to using streams.\n"); |
| 1414 | ret = -EINVAL; |
| 1415 | } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & |
| 1416 | EP_GETTING_NO_STREAMS) { |
| 1417 | xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " |
| 1418 | "is transitioning to " |
| 1419 | "not having streams.\n"); |
| 1420 | ret = -EINVAL; |
| 1421 | } else { |
| 1422 | ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, |
| 1423 | slot_id, ep_index); |
| 1424 | } |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1425 | if (ret) |
| 1426 | goto free_priv; |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1427 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1428 | } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { |
| 1429 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1430 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1431 | goto dying; |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1432 | ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, |
| 1433 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1434 | if (ret) |
| 1435 | goto free_priv; |
Sarah Sharp | 624defa | 2009-09-02 12:14:28 -0700 | [diff] [blame] | 1436 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1437 | } else { |
Andiry Xu | 787f4e5 | 2010-07-22 15:23:52 -0700 | [diff] [blame] | 1438 | spin_lock_irqsave(&xhci->lock, flags); |
| 1439 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1440 | goto dying; |
| 1441 | ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, |
| 1442 | slot_id, ep_index); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1443 | if (ret) |
| 1444 | goto free_priv; |
Andiry Xu | 787f4e5 | 2010-07-22 15:23:52 -0700 | [diff] [blame] | 1445 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 2d3f1fa | 2009-08-07 14:04:49 -0700 | [diff] [blame] | 1446 | } |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1447 | exit: |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1448 | return ret; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1449 | dying: |
| 1450 | xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " |
| 1451 | "non-responsive xHCI host.\n", |
| 1452 | urb->ep->desc.bEndpointAddress, urb); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1453 | ret = -ESHUTDOWN; |
| 1454 | free_priv: |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 1455 | xhci_urb_free_priv(urb_priv); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1456 | urb->hcpriv = NULL; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1457 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | d13565c | 2011-07-22 14:34:34 -0700 | [diff] [blame] | 1458 | return ret; |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1459 | } |
| 1460 | |
Sarah Sharp | 021bff9 | 2010-07-29 22:12:20 -0700 | [diff] [blame] | 1461 | /* Get the right ring for the given URB. |
| 1462 | * If the endpoint supports streams, boundary check the URB's stream ID. |
| 1463 | * If the endpoint doesn't support streams, return the singular endpoint ring. |
| 1464 | */ |
| 1465 | static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, |
| 1466 | struct urb *urb) |
| 1467 | { |
| 1468 | unsigned int slot_id; |
| 1469 | unsigned int ep_index; |
| 1470 | unsigned int stream_id; |
| 1471 | struct xhci_virt_ep *ep; |
| 1472 | |
| 1473 | slot_id = urb->dev->slot_id; |
| 1474 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
| 1475 | stream_id = urb->stream_id; |
| 1476 | ep = &xhci->devs[slot_id]->eps[ep_index]; |
| 1477 | /* Common case: no streams */ |
| 1478 | if (!(ep->ep_state & EP_HAS_STREAMS)) |
| 1479 | return ep->ring; |
| 1480 | |
| 1481 | if (stream_id == 0) { |
| 1482 | xhci_warn(xhci, |
| 1483 | "WARN: Slot ID %u, ep index %u has streams, " |
| 1484 | "but URB has no stream ID.\n", |
| 1485 | slot_id, ep_index); |
| 1486 | return NULL; |
| 1487 | } |
| 1488 | |
| 1489 | if (stream_id < ep->stream_info->num_streams) |
| 1490 | return ep->stream_info->stream_rings[stream_id]; |
| 1491 | |
| 1492 | xhci_warn(xhci, |
| 1493 | "WARN: Slot ID %u, ep index %u has " |
| 1494 | "stream IDs 1 to %u allocated, " |
| 1495 | "but stream ID %u is requested.\n", |
| 1496 | slot_id, ep_index, |
| 1497 | ep->stream_info->num_streams - 1, |
| 1498 | stream_id); |
| 1499 | return NULL; |
| 1500 | } |
| 1501 | |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1502 | /* |
| 1503 | * Remove the URB's TD from the endpoint ring. This may cause the HC to stop |
| 1504 | * USB transfers, potentially stopping in the middle of a TRB buffer. The HC |
| 1505 | * should pick up where it left off in the TD, unless a Set Transfer Ring |
| 1506 | * Dequeue Pointer is issued. |
| 1507 | * |
| 1508 | * The TRBs that make up the buffers for the canceled URB will be "removed" from |
| 1509 | * the ring. Since the ring is a contiguous structure, they can't be physically |
| 1510 | * removed. Instead, there are two options: |
| 1511 | * |
| 1512 | * 1) If the HC is in the middle of processing the URB to be canceled, we |
| 1513 | * simply move the ring's dequeue pointer past those TRBs using the Set |
| 1514 | * Transfer Ring Dequeue Pointer command. This will be the common case, |
| 1515 | * when drivers timeout on the last submitted URB and attempt to cancel. |
| 1516 | * |
| 1517 | * 2) If the HC is in the middle of a different TD, we turn the TRBs into a |
| 1518 | * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The |
| 1519 | * HC will need to invalidate the any TRBs it has cached after the stop |
| 1520 | * endpoint command, as noted in the xHCI 0.95 errata. |
| 1521 | * |
| 1522 | * 3) The TD may have completed by the time the Stop Endpoint Command |
| 1523 | * completes, so software needs to handle that case too. |
| 1524 | * |
| 1525 | * This function should protect against the TD enqueueing code ringing the |
| 1526 | * doorbell while this code is waiting for a Stop Endpoint command to complete. |
| 1527 | * It also needs to account for multiple cancellations on happening at the same |
| 1528 | * time for the same endpoint. |
| 1529 | * |
| 1530 | * Note that this function can be called in any context, or so says |
| 1531 | * usb_hcd_unlink_urb() |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1532 | */ |
| 1533 | int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
| 1534 | { |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1535 | unsigned long flags; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1536 | int ret, i; |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1537 | u32 temp; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1538 | struct xhci_hcd *xhci; |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1539 | struct urb_priv *urb_priv; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1540 | struct xhci_td *td; |
| 1541 | unsigned int ep_index; |
| 1542 | struct xhci_ring *ep_ring; |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 1543 | struct xhci_virt_ep *ep; |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1544 | struct xhci_command *command; |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1545 | |
| 1546 | xhci = hcd_to_xhci(hcd); |
| 1547 | spin_lock_irqsave(&xhci->lock, flags); |
| 1548 | /* Make sure the URB hasn't completed or been unlinked already */ |
| 1549 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
| 1550 | if (ret || !urb->hcpriv) |
| 1551 | goto done; |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 1552 | temp = readl(&xhci->op_regs->status); |
Sarah Sharp | c6cc27c | 2011-03-11 10:20:58 -0800 | [diff] [blame] | 1553 | if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Xenia Ragiadakou | aa50b29 | 2013-08-14 06:33:54 +0300 | [diff] [blame] | 1554 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
| 1555 | "HW died, freeing TD."); |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1556 | urb_priv = urb->hcpriv; |
Mathias Nyman | 5c82171 | 2016-01-26 17:50:12 +0200 | [diff] [blame] | 1557 | for (i = urb_priv->td_cnt; |
| 1558 | i < urb_priv->length && xhci->devs[urb->dev->slot_id]; |
| 1559 | i++) { |
Sarah Sharp | 585df1d | 2011-08-02 15:43:40 -0700 | [diff] [blame] | 1560 | td = urb_priv->td[i]; |
| 1561 | if (!list_empty(&td->td_list)) |
| 1562 | list_del_init(&td->td_list); |
| 1563 | if (!list_empty(&td->cancelled_td_list)) |
| 1564 | list_del_init(&td->cancelled_td_list); |
| 1565 | } |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1566 | |
| 1567 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
| 1568 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 214f76f | 2010-10-26 11:22:02 -0700 | [diff] [blame] | 1569 | usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 1570 | xhci_urb_free_priv(urb_priv); |
Sarah Sharp | e34b2fb | 2009-09-28 17:21:37 -0700 | [diff] [blame] | 1571 | return ret; |
| 1572 | } |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 1573 | if ((xhci->xhc_state & XHCI_STATE_DYING) || |
| 1574 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Xenia Ragiadakou | aa50b29 | 2013-08-14 06:33:54 +0300 | [diff] [blame] | 1575 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
| 1576 | "Ep 0x%x: URB %p to be canceled on " |
| 1577 | "non-responsive xHCI host.", |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1578 | urb->ep->desc.bEndpointAddress, urb); |
| 1579 | /* Let the stop endpoint command watchdog timer (which set this |
| 1580 | * state) finish cleaning up the endpoint TD lists. We must |
| 1581 | * have caught it in the middle of dropping a lock and giving |
| 1582 | * back an URB. |
| 1583 | */ |
| 1584 | goto done; |
| 1585 | } |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1586 | |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1587 | ep_index = xhci_get_endpoint_index(&urb->ep->desc); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 1588 | ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 1589 | ep_ring = xhci_urb_to_transfer_ring(xhci, urb); |
| 1590 | if (!ep_ring) { |
| 1591 | ret = -EINVAL; |
| 1592 | goto done; |
| 1593 | } |
| 1594 | |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1595 | urb_priv = urb->hcpriv; |
Sarah Sharp | 79688ac | 2011-12-19 16:56:04 -0800 | [diff] [blame] | 1596 | i = urb_priv->td_cnt; |
| 1597 | if (i < urb_priv->length) |
Xenia Ragiadakou | aa50b29 | 2013-08-14 06:33:54 +0300 | [diff] [blame] | 1598 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
| 1599 | "Cancel URB %p, dev %s, ep 0x%x, " |
| 1600 | "starting at offset 0x%llx", |
Sarah Sharp | 79688ac | 2011-12-19 16:56:04 -0800 | [diff] [blame] | 1601 | urb, urb->dev->devpath, |
| 1602 | urb->ep->desc.bEndpointAddress, |
| 1603 | (unsigned long long) xhci_trb_virt_to_dma( |
| 1604 | urb_priv->td[i]->start_seg, |
| 1605 | urb_priv->td[i]->first_trb)); |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1606 | |
Sarah Sharp | 79688ac | 2011-12-19 16:56:04 -0800 | [diff] [blame] | 1607 | for (; i < urb_priv->length; i++) { |
Andiry Xu | 8e51adc | 2010-07-22 15:23:31 -0700 | [diff] [blame] | 1608 | td = urb_priv->td[i]; |
| 1609 | list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); |
| 1610 | } |
| 1611 | |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1612 | /* Queue a stop endpoint command, but only if this is |
| 1613 | * the first cancellation to be handled. |
| 1614 | */ |
Sarah Sharp | 678539c | 2009-10-27 10:55:52 -0700 | [diff] [blame] | 1615 | if (!(ep->ep_state & EP_HALT_PENDING)) { |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1616 | command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); |
Hans de Goede | a0ee619 | 2014-07-25 22:01:21 +0200 | [diff] [blame] | 1617 | if (!command) { |
| 1618 | ret = -ENOMEM; |
| 1619 | goto done; |
| 1620 | } |
Sarah Sharp | 678539c | 2009-10-27 10:55:52 -0700 | [diff] [blame] | 1621 | ep->ep_state |= EP_HALT_PENDING; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 1622 | ep->stop_cmds_pending++; |
| 1623 | ep->stop_cmd_timer.expires = jiffies + |
| 1624 | XHCI_STOP_EP_CMD_TIMEOUT * HZ; |
| 1625 | add_timer(&ep->stop_cmd_timer); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 1626 | xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, |
| 1627 | ep_index, 0); |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 1628 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | ae63674 | 2009-04-29 19:02:31 -0700 | [diff] [blame] | 1629 | } |
| 1630 | done: |
| 1631 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1632 | return ret; |
Sarah Sharp | d0e96f5a | 2009-04-27 19:58:01 -0700 | [diff] [blame] | 1633 | } |
| 1634 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1635 | /* Drop an endpoint from a new bandwidth configuration for this device. |
| 1636 | * Only one call to this function is allowed per endpoint before |
| 1637 | * check_bandwidth() or reset_bandwidth() must be called. |
| 1638 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
| 1639 | * add the endpoint to the schedule with possibly new parameters denoted by a |
| 1640 | * different endpoint descriptor in usb_host_endpoint. |
| 1641 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
| 1642 | * not allowed. |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1643 | * |
| 1644 | * The USB core will not allow URBs to be queued to an endpoint that is being |
| 1645 | * disabled, so there's no need for mutual exclusion to protect |
| 1646 | * the xhci->devs[slot_id] structure. |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1647 | */ |
| 1648 | int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
| 1649 | struct usb_host_endpoint *ep) |
| 1650 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1651 | struct xhci_hcd *xhci; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1652 | struct xhci_container_ctx *in_ctx, *out_ctx; |
| 1653 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1654 | unsigned int ep_index; |
| 1655 | struct xhci_ep_ctx *ep_ctx; |
| 1656 | u32 drop_flag; |
Julius Werner | d675913 | 2014-06-24 17:14:42 +0300 | [diff] [blame] | 1657 | u32 new_add_flags, new_drop_flags; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1658 | int ret; |
| 1659 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1660 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1661 | if (ret <= 0) |
| 1662 | return ret; |
| 1663 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1664 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1665 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1666 | |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1667 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1668 | drop_flag = xhci_get_endpoint_flag(&ep->desc); |
| 1669 | if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { |
| 1670 | xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", |
| 1671 | __func__, drop_flag); |
| 1672 | return 0; |
| 1673 | } |
| 1674 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1675 | in_ctx = xhci->devs[udev->slot_id]->in_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1676 | out_ctx = xhci->devs[udev->slot_id]->out_ctx; |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 1677 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1678 | if (!ctrl_ctx) { |
| 1679 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 1680 | __func__); |
| 1681 | return 0; |
| 1682 | } |
| 1683 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1684 | ep_index = xhci_get_endpoint_index(&ep->desc); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1685 | ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1686 | /* If the HC already knows the endpoint is disabled, |
| 1687 | * or the HCD has noted it is disabled, ignore this request |
| 1688 | */ |
Matt Evans | f5960b6 | 2011-06-01 10:22:55 +1000 | [diff] [blame] | 1689 | if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == |
| 1690 | cpu_to_le32(EP_STATE_DISABLED)) || |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1691 | le32_to_cpu(ctrl_ctx->drop_flags) & |
| 1692 | xhci_get_endpoint_flag(&ep->desc)) { |
Hans de Goede | a613413 | 2015-01-16 17:54:02 +0200 | [diff] [blame] | 1693 | /* Do not warn when called after a usb_device_reset */ |
| 1694 | if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) |
| 1695 | xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", |
| 1696 | __func__, ep); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1697 | return 0; |
| 1698 | } |
| 1699 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1700 | ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); |
| 1701 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1702 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1703 | ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); |
| 1704 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1705 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1706 | xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); |
| 1707 | |
Chunfeng Yun | 0cbd4b3 | 2015-11-24 13:09:55 +0200 | [diff] [blame] | 1708 | if (xhci->quirks & XHCI_MTK_HOST) |
| 1709 | xhci_mtk_drop_ep_quirk(hcd, udev, ep); |
| 1710 | |
Julius Werner | d675913 | 2014-06-24 17:14:42 +0300 | [diff] [blame] | 1711 | xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1712 | (unsigned int) ep->desc.bEndpointAddress, |
| 1713 | udev->slot_id, |
| 1714 | (unsigned int) new_drop_flags, |
Julius Werner | d675913 | 2014-06-24 17:14:42 +0300 | [diff] [blame] | 1715 | (unsigned int) new_add_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1716 | return 0; |
| 1717 | } |
| 1718 | |
| 1719 | /* Add an endpoint to a new possible bandwidth configuration for this device. |
| 1720 | * Only one call to this function is allowed per endpoint before |
| 1721 | * check_bandwidth() or reset_bandwidth() must be called. |
| 1722 | * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will |
| 1723 | * add the endpoint to the schedule with possibly new parameters denoted by a |
| 1724 | * different endpoint descriptor in usb_host_endpoint. |
| 1725 | * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is |
| 1726 | * not allowed. |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1727 | * |
| 1728 | * The USB core will not allow URBs to be queued to an endpoint until the |
| 1729 | * configuration or alt setting is installed in the device, so there's no need |
| 1730 | * for mutual exclusion to protect the xhci->devs[slot_id] structure. |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1731 | */ |
| 1732 | int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, |
| 1733 | struct usb_host_endpoint *ep) |
| 1734 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1735 | struct xhci_hcd *xhci; |
Lin Wang | 92c9691 | 2015-01-09 16:06:27 +0200 | [diff] [blame] | 1736 | struct xhci_container_ctx *in_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1737 | unsigned int ep_index; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1738 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1739 | u32 added_ctxs; |
Julius Werner | d675913 | 2014-06-24 17:14:42 +0300 | [diff] [blame] | 1740 | u32 new_add_flags, new_drop_flags; |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1741 | struct xhci_virt_device *virt_dev; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1742 | int ret = 0; |
| 1743 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 1744 | ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1745 | if (ret <= 0) { |
| 1746 | /* So we won't queue a reset ep command for a root hub */ |
| 1747 | ep->hcpriv = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1748 | return ret; |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1749 | } |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1750 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 1751 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 1752 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1753 | |
| 1754 | added_ctxs = xhci_get_endpoint_flag(&ep->desc); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1755 | if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { |
| 1756 | /* FIXME when we have to issue an evaluate endpoint command to |
| 1757 | * deal with ep0 max packet size changing once we get the |
| 1758 | * descriptors |
| 1759 | */ |
| 1760 | xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", |
| 1761 | __func__, added_ctxs); |
| 1762 | return 0; |
| 1763 | } |
| 1764 | |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1765 | virt_dev = xhci->devs[udev->slot_id]; |
| 1766 | in_ctx = virt_dev->in_ctx; |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 1767 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1768 | if (!ctrl_ctx) { |
| 1769 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 1770 | __func__); |
| 1771 | return 0; |
| 1772 | } |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1773 | |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1774 | ep_index = xhci_get_endpoint_index(&ep->desc); |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1775 | /* If this endpoint is already in use, and the upper layers are trying |
| 1776 | * to add it again without dropping it, reject the addition. |
| 1777 | */ |
| 1778 | if (virt_dev->eps[ep_index].ring && |
Lin Wang | 92c9691 | 2015-01-09 16:06:27 +0200 | [diff] [blame] | 1779 | !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1780 | xhci_warn(xhci, "Trying to add endpoint 0x%x " |
| 1781 | "without dropping it.\n", |
| 1782 | (unsigned int) ep->desc.bEndpointAddress); |
| 1783 | return -EINVAL; |
| 1784 | } |
| 1785 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1786 | /* If the HCD has already noted the endpoint is enabled, |
| 1787 | * ignore this request. |
| 1788 | */ |
Lin Wang | 92c9691 | 2015-01-09 16:06:27 +0200 | [diff] [blame] | 1789 | if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 1790 | xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", |
| 1791 | __func__, ep); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1792 | return 0; |
| 1793 | } |
| 1794 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 1795 | /* |
| 1796 | * Configuration and alternate setting changes must be done in |
| 1797 | * process context, not interrupt context (or so documenation |
| 1798 | * for usb_set_interface() and usb_set_configuration() claim). |
| 1799 | */ |
Sarah Sharp | fa75ac3 | 2011-06-05 23:10:04 -0700 | [diff] [blame] | 1800 | if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1801 | dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", |
| 1802 | __func__, ep->desc.bEndpointAddress); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1803 | return -ENOMEM; |
| 1804 | } |
| 1805 | |
Chunfeng Yun | 0cbd4b3 | 2015-11-24 13:09:55 +0200 | [diff] [blame] | 1806 | if (xhci->quirks & XHCI_MTK_HOST) { |
| 1807 | ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); |
| 1808 | if (ret < 0) { |
| 1809 | xhci_free_or_cache_endpoint_ring(xhci, |
| 1810 | virt_dev, ep_index); |
| 1811 | return ret; |
| 1812 | } |
| 1813 | } |
| 1814 | |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1815 | ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); |
| 1816 | new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1817 | |
| 1818 | /* If xhci_endpoint_disable() was called for this endpoint, but the |
| 1819 | * xHC hasn't been notified yet through the check_bandwidth() call, |
| 1820 | * this re-adds a new state for the endpoint from the new endpoint |
| 1821 | * descriptors. We must drop and re-add this endpoint, so we leave the |
| 1822 | * drop flags alone. |
| 1823 | */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1824 | new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1825 | |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 1826 | /* Store the usb_device pointer for later use */ |
| 1827 | ep->hcpriv = udev; |
| 1828 | |
Julius Werner | d675913 | 2014-06-24 17:14:42 +0300 | [diff] [blame] | 1829 | xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1830 | (unsigned int) ep->desc.bEndpointAddress, |
| 1831 | udev->slot_id, |
| 1832 | (unsigned int) new_drop_flags, |
Julius Werner | d675913 | 2014-06-24 17:14:42 +0300 | [diff] [blame] | 1833 | (unsigned int) new_add_flags); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1834 | return 0; |
| 1835 | } |
| 1836 | |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1837 | static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1838 | { |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1839 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1840 | struct xhci_ep_ctx *ep_ctx; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1841 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1842 | int i; |
| 1843 | |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 1844 | ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1845 | if (!ctrl_ctx) { |
| 1846 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 1847 | __func__); |
| 1848 | return; |
| 1849 | } |
| 1850 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1851 | /* When a device's add flag and drop flag are zero, any subsequent |
| 1852 | * configure endpoint command will leave that endpoint's state |
| 1853 | * untouched. Make sure we don't leave any old state in the input |
| 1854 | * endpoint contexts. |
| 1855 | */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1856 | ctrl_ctx->drop_flags = 0; |
| 1857 | ctrl_ctx->add_flags = 0; |
| 1858 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1859 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1860 | /* Endpoint 0 is always valid */ |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 1861 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1862 | for (i = 1; i < 31; ++i) { |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 1863 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1864 | ep_ctx->ep_info = 0; |
| 1865 | ep_ctx->ep_info2 = 0; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 1866 | ep_ctx->deq = 0; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 1867 | ep_ctx->tx_info = 0; |
| 1868 | } |
| 1869 | } |
| 1870 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1871 | static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
Sarah Sharp | 00161f7 | 2011-04-28 12:23:23 -0700 | [diff] [blame] | 1872 | struct usb_device *udev, u32 *cmd_status) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1873 | { |
| 1874 | int ret; |
| 1875 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1876 | switch (*cmd_status) { |
Mathias Nyman | c311e39 | 2014-05-08 19:26:03 +0300 | [diff] [blame] | 1877 | case COMP_CMD_ABORT: |
| 1878 | case COMP_CMD_STOP: |
| 1879 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); |
| 1880 | ret = -ETIME; |
| 1881 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1882 | case COMP_ENOMEM: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1883 | dev_warn(&udev->dev, |
| 1884 | "Not enough host controller resources for new device state.\n"); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1885 | ret = -ENOMEM; |
| 1886 | /* FIXME: can we allocate more resources for the HC? */ |
| 1887 | break; |
| 1888 | case COMP_BW_ERR: |
Hans de Goede | 71d8572 | 2012-01-04 23:29:18 +0100 | [diff] [blame] | 1889 | case COMP_2ND_BW_ERR: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1890 | dev_warn(&udev->dev, |
| 1891 | "Not enough bandwidth for new device state.\n"); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1892 | ret = -ENOSPC; |
| 1893 | /* FIXME: can we go back to the old state? */ |
| 1894 | break; |
| 1895 | case COMP_TRB_ERR: |
| 1896 | /* the HCD set up something wrong */ |
| 1897 | dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " |
| 1898 | "add flag = 1, " |
| 1899 | "and endpoint is not disabled.\n"); |
| 1900 | ret = -EINVAL; |
| 1901 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 1902 | case COMP_DEV_ERR: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1903 | dev_warn(&udev->dev, |
| 1904 | "ERROR: Incompatible device for endpoint configure command.\n"); |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 1905 | ret = -ENODEV; |
| 1906 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1907 | case COMP_SUCCESS: |
Xenia Ragiadakou | 3a7fa5b | 2013-07-31 07:35:27 +0300 | [diff] [blame] | 1908 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
| 1909 | "Successful Endpoint Configure command"); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1910 | ret = 0; |
| 1911 | break; |
| 1912 | default: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1913 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", |
| 1914 | *cmd_status); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1915 | ret = -EINVAL; |
| 1916 | break; |
| 1917 | } |
| 1918 | return ret; |
| 1919 | } |
| 1920 | |
| 1921 | static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
Sarah Sharp | 00161f7 | 2011-04-28 12:23:23 -0700 | [diff] [blame] | 1922 | struct usb_device *udev, u32 *cmd_status) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1923 | { |
| 1924 | int ret; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1925 | struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1926 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 1927 | switch (*cmd_status) { |
Mathias Nyman | c311e39 | 2014-05-08 19:26:03 +0300 | [diff] [blame] | 1928 | case COMP_CMD_ABORT: |
| 1929 | case COMP_CMD_STOP: |
| 1930 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); |
| 1931 | ret = -ETIME; |
| 1932 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1933 | case COMP_EINVAL: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1934 | dev_warn(&udev->dev, |
| 1935 | "WARN: xHCI driver setup invalid evaluate context command.\n"); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1936 | ret = -EINVAL; |
| 1937 | break; |
| 1938 | case COMP_EBADSLT: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1939 | dev_warn(&udev->dev, |
| 1940 | "WARN: slot not enabled for evaluate context command.\n"); |
Sarah Sharp | b803134 | 2012-10-16 13:26:22 -0700 | [diff] [blame] | 1941 | ret = -EINVAL; |
| 1942 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1943 | case COMP_CTX_STATE: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1944 | dev_warn(&udev->dev, |
| 1945 | "WARN: invalid context state for evaluate context command.\n"); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1946 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); |
| 1947 | ret = -EINVAL; |
| 1948 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 1949 | case COMP_DEV_ERR: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1950 | dev_warn(&udev->dev, |
| 1951 | "ERROR: Incompatible device for evaluate context command.\n"); |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 1952 | ret = -ENODEV; |
| 1953 | break; |
Alex He | 1bb73a8 | 2011-05-05 18:14:12 +0800 | [diff] [blame] | 1954 | case COMP_MEL_ERR: |
| 1955 | /* Max Exit Latency too large error */ |
| 1956 | dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); |
| 1957 | ret = -EINVAL; |
| 1958 | break; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1959 | case COMP_SUCCESS: |
Xenia Ragiadakou | 3a7fa5b | 2013-07-31 07:35:27 +0300 | [diff] [blame] | 1960 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
| 1961 | "Successful evaluate context command"); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1962 | ret = 0; |
| 1963 | break; |
| 1964 | default: |
Oliver Neukum | 288c0f4 | 2014-06-02 15:25:17 +0200 | [diff] [blame] | 1965 | xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", |
| 1966 | *cmd_status); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 1967 | ret = -EINVAL; |
| 1968 | break; |
| 1969 | } |
| 1970 | return ret; |
| 1971 | } |
| 1972 | |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1973 | static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1974 | struct xhci_input_control_ctx *ctrl_ctx) |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1975 | { |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1976 | u32 valid_add_flags; |
| 1977 | u32 valid_drop_flags; |
| 1978 | |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1979 | /* Ignore the slot flag (bit 0), and the default control endpoint flag |
| 1980 | * (bit 1). The default control endpoint is added during the Address |
| 1981 | * Device command and is never removed until the slot is disabled. |
| 1982 | */ |
Xenia Ragiadakou | ef73400 | 2013-09-09 21:03:06 +0300 | [diff] [blame] | 1983 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
| 1984 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1985 | |
| 1986 | /* Use hweight32 to count the number of ones in the add flags, or |
| 1987 | * number of endpoints added. Don't count endpoints that are changed |
| 1988 | * (both added and dropped). |
| 1989 | */ |
| 1990 | return hweight32(valid_add_flags) - |
| 1991 | hweight32(valid_add_flags & valid_drop_flags); |
| 1992 | } |
| 1993 | |
| 1994 | static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 1995 | struct xhci_input_control_ctx *ctrl_ctx) |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1996 | { |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 1997 | u32 valid_add_flags; |
| 1998 | u32 valid_drop_flags; |
| 1999 | |
Xenia Ragiadakou | 78d1ff0 | 2013-09-09 21:03:07 +0300 | [diff] [blame] | 2000 | valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; |
| 2001 | valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2002 | |
| 2003 | return hweight32(valid_drop_flags) - |
| 2004 | hweight32(valid_add_flags & valid_drop_flags); |
| 2005 | } |
| 2006 | |
| 2007 | /* |
| 2008 | * We need to reserve the new number of endpoints before the configure endpoint |
| 2009 | * command completes. We can't subtract the dropped endpoints from the number |
| 2010 | * of active endpoints until the command completes because we can oversubscribe |
| 2011 | * the host in this case: |
| 2012 | * |
| 2013 | * - the first configure endpoint command drops more endpoints than it adds |
| 2014 | * - a second configure endpoint command that adds more endpoints is queued |
| 2015 | * - the first configure endpoint command fails, so the config is unchanged |
| 2016 | * - the second command may succeed, even though there isn't enough resources |
| 2017 | * |
| 2018 | * Must be called with xhci->lock held. |
| 2019 | */ |
| 2020 | static int xhci_reserve_host_resources(struct xhci_hcd *xhci, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2021 | struct xhci_input_control_ctx *ctrl_ctx) |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2022 | { |
| 2023 | u32 added_eps; |
| 2024 | |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2025 | added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2026 | if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2027 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2028 | "Not enough ep ctxs: " |
| 2029 | "%u active, need to add %u, limit is %u.", |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2030 | xhci->num_active_eps, added_eps, |
| 2031 | xhci->limit_active_eps); |
| 2032 | return -ENOMEM; |
| 2033 | } |
| 2034 | xhci->num_active_eps += added_eps; |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2035 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2036 | "Adding %u ep ctxs, %u now active.", added_eps, |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2037 | xhci->num_active_eps); |
| 2038 | return 0; |
| 2039 | } |
| 2040 | |
| 2041 | /* |
| 2042 | * The configure endpoint was failed by the xHC for some other reason, so we |
| 2043 | * need to revert the resources that failed configuration would have used. |
| 2044 | * |
| 2045 | * Must be called with xhci->lock held. |
| 2046 | */ |
| 2047 | static void xhci_free_host_resources(struct xhci_hcd *xhci, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2048 | struct xhci_input_control_ctx *ctrl_ctx) |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2049 | { |
| 2050 | u32 num_failed_eps; |
| 2051 | |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2052 | num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2053 | xhci->num_active_eps -= num_failed_eps; |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2054 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2055 | "Removing %u failed ep ctxs, %u now active.", |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2056 | num_failed_eps, |
| 2057 | xhci->num_active_eps); |
| 2058 | } |
| 2059 | |
| 2060 | /* |
| 2061 | * Now that the command has completed, clean up the active endpoint count by |
| 2062 | * subtracting out the endpoints that were dropped (but not changed). |
| 2063 | * |
| 2064 | * Must be called with xhci->lock held. |
| 2065 | */ |
| 2066 | static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2067 | struct xhci_input_control_ctx *ctrl_ctx) |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2068 | { |
| 2069 | u32 num_dropped_eps; |
| 2070 | |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2071 | num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2072 | xhci->num_active_eps -= num_dropped_eps; |
| 2073 | if (num_dropped_eps) |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2074 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2075 | "Removing %u dropped ep ctxs, %u now active.", |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2076 | num_dropped_eps, |
| 2077 | xhci->num_active_eps); |
| 2078 | } |
| 2079 | |
Felipe Balbi | ed384bd | 2012-08-07 14:10:03 +0300 | [diff] [blame] | 2080 | static unsigned int xhci_get_block_size(struct usb_device *udev) |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2081 | { |
| 2082 | switch (udev->speed) { |
| 2083 | case USB_SPEED_LOW: |
| 2084 | case USB_SPEED_FULL: |
| 2085 | return FS_BLOCK; |
| 2086 | case USB_SPEED_HIGH: |
| 2087 | return HS_BLOCK; |
| 2088 | case USB_SPEED_SUPER: |
| 2089 | return SS_BLOCK; |
| 2090 | case USB_SPEED_UNKNOWN: |
| 2091 | case USB_SPEED_WIRELESS: |
| 2092 | default: |
| 2093 | /* Should never happen */ |
| 2094 | return 1; |
| 2095 | } |
| 2096 | } |
| 2097 | |
Felipe Balbi | ed384bd | 2012-08-07 14:10:03 +0300 | [diff] [blame] | 2098 | static unsigned int |
| 2099 | xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2100 | { |
| 2101 | if (interval_bw->overhead[LS_OVERHEAD_TYPE]) |
| 2102 | return LS_OVERHEAD; |
| 2103 | if (interval_bw->overhead[FS_OVERHEAD_TYPE]) |
| 2104 | return FS_OVERHEAD; |
| 2105 | return HS_OVERHEAD; |
| 2106 | } |
| 2107 | |
| 2108 | /* If we are changing a LS/FS device under a HS hub, |
| 2109 | * make sure (if we are activating a new TT) that the HS bus has enough |
| 2110 | * bandwidth for this new TT. |
| 2111 | */ |
| 2112 | static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, |
| 2113 | struct xhci_virt_device *virt_dev, |
| 2114 | int old_active_eps) |
| 2115 | { |
| 2116 | struct xhci_interval_bw_table *bw_table; |
| 2117 | struct xhci_tt_bw_info *tt_info; |
| 2118 | |
| 2119 | /* Find the bandwidth table for the root port this TT is attached to. */ |
| 2120 | bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; |
| 2121 | tt_info = virt_dev->tt_info; |
| 2122 | /* If this TT already had active endpoints, the bandwidth for this TT |
| 2123 | * has already been added. Removing all periodic endpoints (and thus |
| 2124 | * making the TT enactive) will only decrease the bandwidth used. |
| 2125 | */ |
| 2126 | if (old_active_eps) |
| 2127 | return 0; |
| 2128 | if (old_active_eps == 0 && tt_info->active_eps != 0) { |
| 2129 | if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) |
| 2130 | return -ENOMEM; |
| 2131 | return 0; |
| 2132 | } |
| 2133 | /* Not sure why we would have no new active endpoints... |
| 2134 | * |
| 2135 | * Maybe because of an Evaluate Context change for a hub update or a |
| 2136 | * control endpoint 0 max packet size change? |
| 2137 | * FIXME: skip the bandwidth calculation in that case. |
| 2138 | */ |
| 2139 | return 0; |
| 2140 | } |
| 2141 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2142 | static int xhci_check_ss_bw(struct xhci_hcd *xhci, |
| 2143 | struct xhci_virt_device *virt_dev) |
| 2144 | { |
| 2145 | unsigned int bw_reserved; |
| 2146 | |
| 2147 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); |
| 2148 | if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) |
| 2149 | return -ENOMEM; |
| 2150 | |
| 2151 | bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); |
| 2152 | if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) |
| 2153 | return -ENOMEM; |
| 2154 | |
| 2155 | return 0; |
| 2156 | } |
| 2157 | |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2158 | /* |
| 2159 | * This algorithm is a very conservative estimate of the worst-case scheduling |
| 2160 | * scenario for any one interval. The hardware dynamically schedules the |
| 2161 | * packets, so we can't tell which microframe could be the limiting factor in |
| 2162 | * the bandwidth scheduling. This only takes into account periodic endpoints. |
| 2163 | * |
| 2164 | * Obviously, we can't solve an NP complete problem to find the minimum worst |
| 2165 | * case scenario. Instead, we come up with an estimate that is no less than |
| 2166 | * the worst case bandwidth used for any one microframe, but may be an |
| 2167 | * over-estimate. |
| 2168 | * |
| 2169 | * We walk the requirements for each endpoint by interval, starting with the |
| 2170 | * smallest interval, and place packets in the schedule where there is only one |
| 2171 | * possible way to schedule packets for that interval. In order to simplify |
| 2172 | * this algorithm, we record the largest max packet size for each interval, and |
| 2173 | * assume all packets will be that size. |
| 2174 | * |
| 2175 | * For interval 0, we obviously must schedule all packets for each interval. |
| 2176 | * The bandwidth for interval 0 is just the amount of data to be transmitted |
| 2177 | * (the sum of all max ESIT payload sizes, plus any overhead per packet times |
| 2178 | * the number of packets). |
| 2179 | * |
| 2180 | * For interval 1, we have two possible microframes to schedule those packets |
| 2181 | * in. For this algorithm, if we can schedule the same number of packets for |
| 2182 | * each possible scheduling opportunity (each microframe), we will do so. The |
| 2183 | * remaining number of packets will be saved to be transmitted in the gaps in |
| 2184 | * the next interval's scheduling sequence. |
| 2185 | * |
| 2186 | * As we move those remaining packets to be scheduled with interval 2 packets, |
| 2187 | * we have to double the number of remaining packets to transmit. This is |
| 2188 | * because the intervals are actually powers of 2, and we would be transmitting |
| 2189 | * the previous interval's packets twice in this interval. We also have to be |
| 2190 | * sure that when we look at the largest max packet size for this interval, we |
| 2191 | * also look at the largest max packet size for the remaining packets and take |
| 2192 | * the greater of the two. |
| 2193 | * |
| 2194 | * The algorithm continues to evenly distribute packets in each scheduling |
| 2195 | * opportunity, and push the remaining packets out, until we get to the last |
| 2196 | * interval. Then those packets and their associated overhead are just added |
| 2197 | * to the bandwidth used. |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2198 | */ |
| 2199 | static int xhci_check_bw_table(struct xhci_hcd *xhci, |
| 2200 | struct xhci_virt_device *virt_dev, |
| 2201 | int old_active_eps) |
| 2202 | { |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2203 | unsigned int bw_reserved; |
| 2204 | unsigned int max_bandwidth; |
| 2205 | unsigned int bw_used; |
| 2206 | unsigned int block_size; |
| 2207 | struct xhci_interval_bw_table *bw_table; |
| 2208 | unsigned int packet_size = 0; |
| 2209 | unsigned int overhead = 0; |
| 2210 | unsigned int packets_transmitted = 0; |
| 2211 | unsigned int packets_remaining = 0; |
| 2212 | unsigned int i; |
| 2213 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2214 | if (virt_dev->udev->speed == USB_SPEED_SUPER) |
| 2215 | return xhci_check_ss_bw(xhci, virt_dev); |
| 2216 | |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2217 | if (virt_dev->udev->speed == USB_SPEED_HIGH) { |
| 2218 | max_bandwidth = HS_BW_LIMIT; |
| 2219 | /* Convert percent of bus BW reserved to blocks reserved */ |
| 2220 | bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); |
| 2221 | } else { |
| 2222 | max_bandwidth = FS_BW_LIMIT; |
| 2223 | bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); |
| 2224 | } |
| 2225 | |
| 2226 | bw_table = virt_dev->bw_table; |
| 2227 | /* We need to translate the max packet size and max ESIT payloads into |
| 2228 | * the units the hardware uses. |
| 2229 | */ |
| 2230 | block_size = xhci_get_block_size(virt_dev->udev); |
| 2231 | |
| 2232 | /* If we are manipulating a LS/FS device under a HS hub, double check |
| 2233 | * that the HS bus has enough bandwidth if we are activing a new TT. |
| 2234 | */ |
| 2235 | if (virt_dev->tt_info) { |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2236 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2237 | "Recalculating BW for rootport %u", |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2238 | virt_dev->real_port); |
| 2239 | if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { |
| 2240 | xhci_warn(xhci, "Not enough bandwidth on HS bus for " |
| 2241 | "newly activated TT.\n"); |
| 2242 | return -ENOMEM; |
| 2243 | } |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2244 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2245 | "Recalculating BW for TT slot %u port %u", |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2246 | virt_dev->tt_info->slot_id, |
| 2247 | virt_dev->tt_info->ttport); |
| 2248 | } else { |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2249 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2250 | "Recalculating BW for rootport %u", |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2251 | virt_dev->real_port); |
| 2252 | } |
| 2253 | |
| 2254 | /* Add in how much bandwidth will be used for interval zero, or the |
| 2255 | * rounded max ESIT payload + number of packets * largest overhead. |
| 2256 | */ |
| 2257 | bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + |
| 2258 | bw_table->interval_bw[0].num_packets * |
| 2259 | xhci_get_largest_overhead(&bw_table->interval_bw[0]); |
| 2260 | |
| 2261 | for (i = 1; i < XHCI_MAX_INTERVAL; i++) { |
| 2262 | unsigned int bw_added; |
| 2263 | unsigned int largest_mps; |
| 2264 | unsigned int interval_overhead; |
| 2265 | |
| 2266 | /* |
| 2267 | * How many packets could we transmit in this interval? |
| 2268 | * If packets didn't fit in the previous interval, we will need |
| 2269 | * to transmit that many packets twice within this interval. |
| 2270 | */ |
| 2271 | packets_remaining = 2 * packets_remaining + |
| 2272 | bw_table->interval_bw[i].num_packets; |
| 2273 | |
| 2274 | /* Find the largest max packet size of this or the previous |
| 2275 | * interval. |
| 2276 | */ |
| 2277 | if (list_empty(&bw_table->interval_bw[i].endpoints)) |
| 2278 | largest_mps = 0; |
| 2279 | else { |
| 2280 | struct xhci_virt_ep *virt_ep; |
| 2281 | struct list_head *ep_entry; |
| 2282 | |
| 2283 | ep_entry = bw_table->interval_bw[i].endpoints.next; |
| 2284 | virt_ep = list_entry(ep_entry, |
| 2285 | struct xhci_virt_ep, bw_endpoint_list); |
| 2286 | /* Convert to blocks, rounding up */ |
| 2287 | largest_mps = DIV_ROUND_UP( |
| 2288 | virt_ep->bw_info.max_packet_size, |
| 2289 | block_size); |
| 2290 | } |
| 2291 | if (largest_mps > packet_size) |
| 2292 | packet_size = largest_mps; |
| 2293 | |
| 2294 | /* Use the larger overhead of this or the previous interval. */ |
| 2295 | interval_overhead = xhci_get_largest_overhead( |
| 2296 | &bw_table->interval_bw[i]); |
| 2297 | if (interval_overhead > overhead) |
| 2298 | overhead = interval_overhead; |
| 2299 | |
| 2300 | /* How many packets can we evenly distribute across |
| 2301 | * (1 << (i + 1)) possible scheduling opportunities? |
| 2302 | */ |
| 2303 | packets_transmitted = packets_remaining >> (i + 1); |
| 2304 | |
| 2305 | /* Add in the bandwidth used for those scheduled packets */ |
| 2306 | bw_added = packets_transmitted * (overhead + packet_size); |
| 2307 | |
| 2308 | /* How many packets do we have remaining to transmit? */ |
| 2309 | packets_remaining = packets_remaining % (1 << (i + 1)); |
| 2310 | |
| 2311 | /* What largest max packet size should those packets have? */ |
| 2312 | /* If we've transmitted all packets, don't carry over the |
| 2313 | * largest packet size. |
| 2314 | */ |
| 2315 | if (packets_remaining == 0) { |
| 2316 | packet_size = 0; |
| 2317 | overhead = 0; |
| 2318 | } else if (packets_transmitted > 0) { |
| 2319 | /* Otherwise if we do have remaining packets, and we've |
| 2320 | * scheduled some packets in this interval, take the |
| 2321 | * largest max packet size from endpoints with this |
| 2322 | * interval. |
| 2323 | */ |
| 2324 | packet_size = largest_mps; |
| 2325 | overhead = interval_overhead; |
| 2326 | } |
| 2327 | /* Otherwise carry over packet_size and overhead from the last |
| 2328 | * time we had a remainder. |
| 2329 | */ |
| 2330 | bw_used += bw_added; |
| 2331 | if (bw_used > max_bandwidth) { |
| 2332 | xhci_warn(xhci, "Not enough bandwidth. " |
| 2333 | "Proposed: %u, Max: %u\n", |
| 2334 | bw_used, max_bandwidth); |
| 2335 | return -ENOMEM; |
| 2336 | } |
| 2337 | } |
| 2338 | /* |
| 2339 | * Ok, we know we have some packets left over after even-handedly |
| 2340 | * scheduling interval 15. We don't know which microframes they will |
| 2341 | * fit into, so we over-schedule and say they will be scheduled every |
| 2342 | * microframe. |
| 2343 | */ |
| 2344 | if (packets_remaining > 0) |
| 2345 | bw_used += overhead + packet_size; |
| 2346 | |
| 2347 | if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { |
| 2348 | unsigned int port_index = virt_dev->real_port - 1; |
| 2349 | |
| 2350 | /* OK, we're manipulating a HS device attached to a |
| 2351 | * root port bandwidth domain. Include the number of active TTs |
| 2352 | * in the bandwidth used. |
| 2353 | */ |
| 2354 | bw_used += TT_HS_OVERHEAD * |
| 2355 | xhci->rh_bw[port_index].num_active_tts; |
| 2356 | } |
| 2357 | |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2358 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2359 | "Final bandwidth: %u, Limit: %u, Reserved: %u, " |
| 2360 | "Available: %u " "percent", |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2361 | bw_used, max_bandwidth, bw_reserved, |
| 2362 | (max_bandwidth - bw_used - bw_reserved) * 100 / |
| 2363 | max_bandwidth); |
| 2364 | |
| 2365 | bw_used += bw_reserved; |
| 2366 | if (bw_used > max_bandwidth) { |
| 2367 | xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", |
| 2368 | bw_used, max_bandwidth); |
| 2369 | return -ENOMEM; |
| 2370 | } |
| 2371 | |
| 2372 | bw_table->bw_used = bw_used; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2373 | return 0; |
| 2374 | } |
| 2375 | |
| 2376 | static bool xhci_is_async_ep(unsigned int ep_type) |
| 2377 | { |
| 2378 | return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && |
| 2379 | ep_type != ISOC_IN_EP && |
| 2380 | ep_type != INT_IN_EP); |
| 2381 | } |
| 2382 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2383 | static bool xhci_is_sync_in_ep(unsigned int ep_type) |
| 2384 | { |
Sarah Sharp | 392a07a | 2012-10-25 13:44:12 -0700 | [diff] [blame] | 2385 | return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2386 | } |
| 2387 | |
| 2388 | static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) |
| 2389 | { |
| 2390 | unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); |
| 2391 | |
| 2392 | if (ep_bw->ep_interval == 0) |
| 2393 | return SS_OVERHEAD_BURST + |
| 2394 | (ep_bw->mult * ep_bw->num_packets * |
| 2395 | (SS_OVERHEAD + mps)); |
| 2396 | return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * |
| 2397 | (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), |
| 2398 | 1 << ep_bw->ep_interval); |
| 2399 | |
| 2400 | } |
| 2401 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2402 | void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, |
| 2403 | struct xhci_bw_info *ep_bw, |
| 2404 | struct xhci_interval_bw_table *bw_table, |
| 2405 | struct usb_device *udev, |
| 2406 | struct xhci_virt_ep *virt_ep, |
| 2407 | struct xhci_tt_bw_info *tt_info) |
| 2408 | { |
| 2409 | struct xhci_interval_bw *interval_bw; |
| 2410 | int normalized_interval; |
| 2411 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2412 | if (xhci_is_async_ep(ep_bw->type)) |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2413 | return; |
| 2414 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2415 | if (udev->speed == USB_SPEED_SUPER) { |
| 2416 | if (xhci_is_sync_in_ep(ep_bw->type)) |
| 2417 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= |
| 2418 | xhci_get_ss_bw_consumed(ep_bw); |
| 2419 | else |
| 2420 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= |
| 2421 | xhci_get_ss_bw_consumed(ep_bw); |
| 2422 | return; |
| 2423 | } |
| 2424 | |
| 2425 | /* SuperSpeed endpoints never get added to intervals in the table, so |
| 2426 | * this check is only valid for HS/FS/LS devices. |
| 2427 | */ |
| 2428 | if (list_empty(&virt_ep->bw_endpoint_list)) |
| 2429 | return; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2430 | /* For LS/FS devices, we need to translate the interval expressed in |
| 2431 | * microframes to frames. |
| 2432 | */ |
| 2433 | if (udev->speed == USB_SPEED_HIGH) |
| 2434 | normalized_interval = ep_bw->ep_interval; |
| 2435 | else |
| 2436 | normalized_interval = ep_bw->ep_interval - 3; |
| 2437 | |
| 2438 | if (normalized_interval == 0) |
| 2439 | bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; |
| 2440 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
| 2441 | interval_bw->num_packets -= ep_bw->num_packets; |
| 2442 | switch (udev->speed) { |
| 2443 | case USB_SPEED_LOW: |
| 2444 | interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; |
| 2445 | break; |
| 2446 | case USB_SPEED_FULL: |
| 2447 | interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; |
| 2448 | break; |
| 2449 | case USB_SPEED_HIGH: |
| 2450 | interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; |
| 2451 | break; |
| 2452 | case USB_SPEED_SUPER: |
| 2453 | case USB_SPEED_UNKNOWN: |
| 2454 | case USB_SPEED_WIRELESS: |
| 2455 | /* Should never happen because only LS/FS/HS endpoints will get |
| 2456 | * added to the endpoint list. |
| 2457 | */ |
| 2458 | return; |
| 2459 | } |
| 2460 | if (tt_info) |
| 2461 | tt_info->active_eps -= 1; |
| 2462 | list_del_init(&virt_ep->bw_endpoint_list); |
| 2463 | } |
| 2464 | |
| 2465 | static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, |
| 2466 | struct xhci_bw_info *ep_bw, |
| 2467 | struct xhci_interval_bw_table *bw_table, |
| 2468 | struct usb_device *udev, |
| 2469 | struct xhci_virt_ep *virt_ep, |
| 2470 | struct xhci_tt_bw_info *tt_info) |
| 2471 | { |
| 2472 | struct xhci_interval_bw *interval_bw; |
| 2473 | struct xhci_virt_ep *smaller_ep; |
| 2474 | int normalized_interval; |
| 2475 | |
| 2476 | if (xhci_is_async_ep(ep_bw->type)) |
| 2477 | return; |
| 2478 | |
Sarah Sharp | 2b69899 | 2011-09-13 16:41:13 -0700 | [diff] [blame] | 2479 | if (udev->speed == USB_SPEED_SUPER) { |
| 2480 | if (xhci_is_sync_in_ep(ep_bw->type)) |
| 2481 | xhci->devs[udev->slot_id]->bw_table->ss_bw_in += |
| 2482 | xhci_get_ss_bw_consumed(ep_bw); |
| 2483 | else |
| 2484 | xhci->devs[udev->slot_id]->bw_table->ss_bw_out += |
| 2485 | xhci_get_ss_bw_consumed(ep_bw); |
| 2486 | return; |
| 2487 | } |
| 2488 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2489 | /* For LS/FS devices, we need to translate the interval expressed in |
| 2490 | * microframes to frames. |
| 2491 | */ |
| 2492 | if (udev->speed == USB_SPEED_HIGH) |
| 2493 | normalized_interval = ep_bw->ep_interval; |
| 2494 | else |
| 2495 | normalized_interval = ep_bw->ep_interval - 3; |
| 2496 | |
| 2497 | if (normalized_interval == 0) |
| 2498 | bw_table->interval0_esit_payload += ep_bw->max_esit_payload; |
| 2499 | interval_bw = &bw_table->interval_bw[normalized_interval]; |
| 2500 | interval_bw->num_packets += ep_bw->num_packets; |
| 2501 | switch (udev->speed) { |
| 2502 | case USB_SPEED_LOW: |
| 2503 | interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; |
| 2504 | break; |
| 2505 | case USB_SPEED_FULL: |
| 2506 | interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; |
| 2507 | break; |
| 2508 | case USB_SPEED_HIGH: |
| 2509 | interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; |
| 2510 | break; |
| 2511 | case USB_SPEED_SUPER: |
| 2512 | case USB_SPEED_UNKNOWN: |
| 2513 | case USB_SPEED_WIRELESS: |
| 2514 | /* Should never happen because only LS/FS/HS endpoints will get |
| 2515 | * added to the endpoint list. |
| 2516 | */ |
| 2517 | return; |
| 2518 | } |
| 2519 | |
| 2520 | if (tt_info) |
| 2521 | tt_info->active_eps += 1; |
| 2522 | /* Insert the endpoint into the list, largest max packet size first. */ |
| 2523 | list_for_each_entry(smaller_ep, &interval_bw->endpoints, |
| 2524 | bw_endpoint_list) { |
| 2525 | if (ep_bw->max_packet_size >= |
| 2526 | smaller_ep->bw_info.max_packet_size) { |
| 2527 | /* Add the new ep before the smaller endpoint */ |
| 2528 | list_add_tail(&virt_ep->bw_endpoint_list, |
| 2529 | &smaller_ep->bw_endpoint_list); |
| 2530 | return; |
| 2531 | } |
| 2532 | } |
| 2533 | /* Add the new endpoint at the end of the list. */ |
| 2534 | list_add_tail(&virt_ep->bw_endpoint_list, |
| 2535 | &interval_bw->endpoints); |
| 2536 | } |
| 2537 | |
| 2538 | void xhci_update_tt_active_eps(struct xhci_hcd *xhci, |
| 2539 | struct xhci_virt_device *virt_dev, |
| 2540 | int old_active_eps) |
| 2541 | { |
| 2542 | struct xhci_root_port_bw_info *rh_bw_info; |
| 2543 | if (!virt_dev->tt_info) |
| 2544 | return; |
| 2545 | |
| 2546 | rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; |
| 2547 | if (old_active_eps == 0 && |
| 2548 | virt_dev->tt_info->active_eps != 0) { |
| 2549 | rh_bw_info->num_active_tts += 1; |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2550 | rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2551 | } else if (old_active_eps != 0 && |
| 2552 | virt_dev->tt_info->active_eps == 0) { |
| 2553 | rh_bw_info->num_active_tts -= 1; |
Sarah Sharp | c29eea6 | 2011-09-02 11:05:52 -0700 | [diff] [blame] | 2554 | rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2555 | } |
| 2556 | } |
| 2557 | |
| 2558 | static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, |
| 2559 | struct xhci_virt_device *virt_dev, |
| 2560 | struct xhci_container_ctx *in_ctx) |
| 2561 | { |
| 2562 | struct xhci_bw_info ep_bw_info[31]; |
| 2563 | int i; |
| 2564 | struct xhci_input_control_ctx *ctrl_ctx; |
| 2565 | int old_active_eps = 0; |
| 2566 | |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2567 | if (virt_dev->tt_info) |
| 2568 | old_active_eps = virt_dev->tt_info->active_eps; |
| 2569 | |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 2570 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2571 | if (!ctrl_ctx) { |
| 2572 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 2573 | __func__); |
| 2574 | return -ENOMEM; |
| 2575 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2576 | |
| 2577 | for (i = 0; i < 31; i++) { |
| 2578 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
| 2579 | continue; |
| 2580 | |
| 2581 | /* Make a copy of the BW info in case we need to revert this */ |
| 2582 | memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, |
| 2583 | sizeof(ep_bw_info[i])); |
| 2584 | /* Drop the endpoint from the interval table if the endpoint is |
| 2585 | * being dropped or changed. |
| 2586 | */ |
| 2587 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
| 2588 | xhci_drop_ep_from_interval_table(xhci, |
| 2589 | &virt_dev->eps[i].bw_info, |
| 2590 | virt_dev->bw_table, |
| 2591 | virt_dev->udev, |
| 2592 | &virt_dev->eps[i], |
| 2593 | virt_dev->tt_info); |
| 2594 | } |
| 2595 | /* Overwrite the information stored in the endpoints' bw_info */ |
| 2596 | xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); |
| 2597 | for (i = 0; i < 31; i++) { |
| 2598 | /* Add any changed or added endpoints to the interval table */ |
| 2599 | if (EP_IS_ADDED(ctrl_ctx, i)) |
| 2600 | xhci_add_ep_to_interval_table(xhci, |
| 2601 | &virt_dev->eps[i].bw_info, |
| 2602 | virt_dev->bw_table, |
| 2603 | virt_dev->udev, |
| 2604 | &virt_dev->eps[i], |
| 2605 | virt_dev->tt_info); |
| 2606 | } |
| 2607 | |
| 2608 | if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { |
| 2609 | /* Ok, this fits in the bandwidth we have. |
| 2610 | * Update the number of active TTs. |
| 2611 | */ |
| 2612 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
| 2613 | return 0; |
| 2614 | } |
| 2615 | |
| 2616 | /* We don't have enough bandwidth for this, revert the stored info. */ |
| 2617 | for (i = 0; i < 31; i++) { |
| 2618 | if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) |
| 2619 | continue; |
| 2620 | |
| 2621 | /* Drop the new copies of any added or changed endpoints from |
| 2622 | * the interval table. |
| 2623 | */ |
| 2624 | if (EP_IS_ADDED(ctrl_ctx, i)) { |
| 2625 | xhci_drop_ep_from_interval_table(xhci, |
| 2626 | &virt_dev->eps[i].bw_info, |
| 2627 | virt_dev->bw_table, |
| 2628 | virt_dev->udev, |
| 2629 | &virt_dev->eps[i], |
| 2630 | virt_dev->tt_info); |
| 2631 | } |
| 2632 | /* Revert the endpoint back to its old information */ |
| 2633 | memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], |
| 2634 | sizeof(ep_bw_info[i])); |
| 2635 | /* Add any changed or dropped endpoints back into the table */ |
| 2636 | if (EP_IS_DROPPED(ctrl_ctx, i)) |
| 2637 | xhci_add_ep_to_interval_table(xhci, |
| 2638 | &virt_dev->eps[i].bw_info, |
| 2639 | virt_dev->bw_table, |
| 2640 | virt_dev->udev, |
| 2641 | &virt_dev->eps[i], |
| 2642 | virt_dev->tt_info); |
| 2643 | } |
| 2644 | return -ENOMEM; |
| 2645 | } |
| 2646 | |
| 2647 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2648 | /* Issue a configure endpoint command or evaluate context command |
| 2649 | * and wait for it to finish. |
| 2650 | */ |
| 2651 | static int xhci_configure_endpoint(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2652 | struct usb_device *udev, |
| 2653 | struct xhci_command *command, |
| 2654 | bool ctx_change, bool must_succeed) |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2655 | { |
| 2656 | int ret; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2657 | unsigned long flags; |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2658 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2659 | struct xhci_virt_device *virt_dev; |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2660 | |
| 2661 | if (!command) |
| 2662 | return -EINVAL; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2663 | |
| 2664 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2665 | virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2666 | |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 2667 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2668 | if (!ctrl_ctx) { |
Emil Goode | 1f21569 | 2013-06-25 15:49:36 -0700 | [diff] [blame] | 2669 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2670 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 2671 | __func__); |
| 2672 | return -ENOMEM; |
| 2673 | } |
Sarah Sharp | 750645f | 2011-09-02 11:05:43 -0700 | [diff] [blame] | 2674 | |
| 2675 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2676 | xhci_reserve_host_resources(xhci, ctrl_ctx)) { |
Sarah Sharp | 750645f | 2011-09-02 11:05:43 -0700 | [diff] [blame] | 2677 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2678 | xhci_warn(xhci, "Not enough host resources, " |
| 2679 | "active endpoint contexts = %u\n", |
| 2680 | xhci->num_active_eps); |
| 2681 | return -ENOMEM; |
| 2682 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2683 | if ((xhci->quirks & XHCI_SW_BW_CHECKING) && |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2684 | xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2685 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2686 | xhci_free_host_resources(xhci, ctrl_ctx); |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 2687 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2688 | xhci_warn(xhci, "Not enough bandwidth\n"); |
| 2689 | return -ENOMEM; |
| 2690 | } |
Sarah Sharp | 750645f | 2011-09-02 11:05:43 -0700 | [diff] [blame] | 2691 | |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2692 | if (!ctx_change) |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2693 | ret = xhci_queue_configure_endpoint(xhci, command, |
| 2694 | command->in_ctx->dma, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2695 | udev->slot_id, must_succeed); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2696 | else |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2697 | ret = xhci_queue_evaluate_context(xhci, command, |
| 2698 | command->in_ctx->dma, |
Sarah Sharp | 4b26654 | 2012-05-07 15:34:26 -0700 | [diff] [blame] | 2699 | udev->slot_id, must_succeed); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2700 | if (ret < 0) { |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2701 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2702 | xhci_free_host_resources(xhci, ctrl_ctx); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2703 | spin_unlock_irqrestore(&xhci->lock, flags); |
Xenia Ragiadakou | 3a7fa5b | 2013-07-31 07:35:27 +0300 | [diff] [blame] | 2704 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
| 2705 | "FIXME allocate a new ring segment"); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2706 | return -ENOMEM; |
| 2707 | } |
| 2708 | xhci_ring_cmd_db(xhci); |
| 2709 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2710 | |
| 2711 | /* Wait for the configure endpoint command to complete */ |
Mathias Nyman | c311e39 | 2014-05-08 19:26:03 +0300 | [diff] [blame] | 2712 | wait_for_completion(command->completion); |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2713 | |
| 2714 | if (!ctx_change) |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2715 | ret = xhci_configure_endpoint_result(xhci, udev, |
| 2716 | &command->status); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2717 | else |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2718 | ret = xhci_evaluate_context_result(xhci, udev, |
| 2719 | &command->status); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2720 | |
| 2721 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 2722 | spin_lock_irqsave(&xhci->lock, flags); |
| 2723 | /* If the command failed, remove the reserved resources. |
| 2724 | * Otherwise, clean up the estimate to include dropped eps. |
| 2725 | */ |
| 2726 | if (ret) |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2727 | xhci_free_host_resources(xhci, ctrl_ctx); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2728 | else |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2729 | xhci_finish_resource_reservation(xhci, ctrl_ctx); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 2730 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 2731 | } |
| 2732 | return ret; |
Sarah Sharp | f2217e8 | 2009-08-07 14:04:43 -0700 | [diff] [blame] | 2733 | } |
| 2734 | |
Hans de Goede | df61383 | 2013-10-04 00:29:45 +0200 | [diff] [blame] | 2735 | static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, |
| 2736 | struct xhci_virt_device *vdev, int i) |
| 2737 | { |
| 2738 | struct xhci_virt_ep *ep = &vdev->eps[i]; |
| 2739 | |
| 2740 | if (ep->ep_state & EP_HAS_STREAMS) { |
| 2741 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", |
| 2742 | xhci_get_endpoint_address(i)); |
| 2743 | xhci_free_stream_info(xhci, ep->stream_info); |
| 2744 | ep->stream_info = NULL; |
| 2745 | ep->ep_state &= ~EP_HAS_STREAMS; |
| 2746 | } |
| 2747 | } |
| 2748 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 2749 | /* Called after one or more calls to xhci_add_endpoint() or |
| 2750 | * xhci_drop_endpoint(). If this call fails, the USB core is expected |
| 2751 | * to call xhci_reset_bandwidth(). |
| 2752 | * |
| 2753 | * Since we are in the middle of changing either configuration or |
| 2754 | * installing a new alt setting, the USB core won't allow URBs to be |
| 2755 | * enqueued for any endpoint on the old config or interface. Nothing |
| 2756 | * else should be touching the xhci->devs[slot_id] structure, so we |
| 2757 | * don't need to take the xhci->lock for manipulating that. |
| 2758 | */ |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2759 | int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
| 2760 | { |
| 2761 | int i; |
| 2762 | int ret = 0; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2763 | struct xhci_hcd *xhci; |
| 2764 | struct xhci_virt_device *virt_dev; |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2765 | struct xhci_input_control_ctx *ctrl_ctx; |
| 2766 | struct xhci_slot_ctx *slot_ctx; |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2767 | struct xhci_command *command; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2768 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 2769 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2770 | if (ret <= 0) |
| 2771 | return ret; |
| 2772 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | fe6c6c1 | 2011-05-23 16:41:17 -0700 | [diff] [blame] | 2773 | if (xhci->xhc_state & XHCI_STATE_DYING) |
| 2774 | return -ENODEV; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2775 | |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 2776 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2777 | virt_dev = xhci->devs[udev->slot_id]; |
| 2778 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2779 | command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); |
| 2780 | if (!command) |
| 2781 | return -ENOMEM; |
| 2782 | |
| 2783 | command->in_ctx = virt_dev->in_ctx; |
| 2784 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2785 | /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 2786 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2787 | if (!ctrl_ctx) { |
| 2788 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 2789 | __func__); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2790 | ret = -ENOMEM; |
| 2791 | goto command_cleanup; |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2792 | } |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2793 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
| 2794 | ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); |
| 2795 | ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); |
Sarah Sharp | 2dc3753 | 2011-09-02 11:05:40 -0700 | [diff] [blame] | 2796 | |
| 2797 | /* Don't issue the command if there's no endpoints to update. */ |
| 2798 | if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2799 | ctrl_ctx->drop_flags == 0) { |
| 2800 | ret = 0; |
| 2801 | goto command_cleanup; |
| 2802 | } |
Julius Werner | d675913 | 2014-06-24 17:14:42 +0300 | [diff] [blame] | 2803 | /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2804 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
Julius Werner | d675913 | 2014-06-24 17:14:42 +0300 | [diff] [blame] | 2805 | for (i = 31; i >= 1; i--) { |
| 2806 | __le32 le32 = cpu_to_le32(BIT(i)); |
| 2807 | |
| 2808 | if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) |
| 2809 | || (ctrl_ctx->add_flags & le32) || i == 1) { |
| 2810 | slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); |
| 2811 | slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); |
| 2812 | break; |
| 2813 | } |
| 2814 | } |
| 2815 | xhci_dbg(xhci, "New Input Control Context:\n"); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2816 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2817 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2818 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2819 | ret = xhci_configure_endpoint(xhci, udev, command, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2820 | false, false); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2821 | if (ret) |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2822 | /* Callee should call reset_bandwidth() */ |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2823 | goto command_cleanup; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2824 | |
| 2825 | xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2826 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2827 | LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2828 | |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2829 | /* Free any rings that were dropped, but not changed. */ |
| 2830 | for (i = 1; i < 31; ++i) { |
Matt Evans | 4819fef | 2011-06-01 13:01:07 +1000 | [diff] [blame] | 2831 | if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && |
Hans de Goede | df61383 | 2013-10-04 00:29:45 +0200 | [diff] [blame] | 2832 | !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2833 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
Hans de Goede | df61383 | 2013-10-04 00:29:45 +0200 | [diff] [blame] | 2834 | xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); |
| 2835 | } |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2836 | } |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2837 | xhci_zero_in_ctx(xhci, virt_dev); |
Sarah Sharp | 834cb0f | 2011-05-12 18:06:37 -0700 | [diff] [blame] | 2838 | /* |
| 2839 | * Install any rings for completely new endpoints or changed endpoints, |
| 2840 | * and free or cache any old rings from changed endpoints. |
| 2841 | */ |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2842 | for (i = 1; i < 31; ++i) { |
Sarah Sharp | 74f9fe2 | 2009-12-03 09:44:29 -0800 | [diff] [blame] | 2843 | if (!virt_dev->eps[i].new_ring) |
| 2844 | continue; |
| 2845 | /* Only cache or free the old ring if it exists. |
| 2846 | * It may not if this is the first add of an endpoint. |
| 2847 | */ |
| 2848 | if (virt_dev->eps[i].ring) { |
Sarah Sharp | 412566b | 2009-12-09 15:59:01 -0800 | [diff] [blame] | 2849 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2850 | } |
Hans de Goede | df61383 | 2013-10-04 00:29:45 +0200 | [diff] [blame] | 2851 | xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); |
Sarah Sharp | 74f9fe2 | 2009-12-03 09:44:29 -0800 | [diff] [blame] | 2852 | virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; |
| 2853 | virt_dev->eps[i].new_ring = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2854 | } |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 2855 | command_cleanup: |
| 2856 | kfree(command->completion); |
| 2857 | kfree(command); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2858 | |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2859 | return ret; |
| 2860 | } |
| 2861 | |
| 2862 | void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) |
| 2863 | { |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2864 | struct xhci_hcd *xhci; |
| 2865 | struct xhci_virt_device *virt_dev; |
| 2866 | int i, ret; |
| 2867 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 2868 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2869 | if (ret <= 0) |
| 2870 | return; |
| 2871 | xhci = hcd_to_xhci(hcd); |
| 2872 | |
Greg Kroah-Hartman | 700e205 | 2009-04-29 19:14:08 -0700 | [diff] [blame] | 2873 | xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2874 | virt_dev = xhci->devs[udev->slot_id]; |
| 2875 | /* Free any rings allocated for added endpoints */ |
| 2876 | for (i = 0; i < 31; ++i) { |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2877 | if (virt_dev->eps[i].new_ring) { |
| 2878 | xhci_ring_free(xhci, virt_dev->eps[i].new_ring); |
| 2879 | virt_dev->eps[i].new_ring = NULL; |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2880 | } |
| 2881 | } |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 2882 | xhci_zero_in_ctx(xhci, virt_dev); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 2883 | } |
| 2884 | |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2885 | static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2886 | struct xhci_container_ctx *in_ctx, |
| 2887 | struct xhci_container_ctx *out_ctx, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2888 | struct xhci_input_control_ctx *ctrl_ctx, |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2889 | u32 add_flags, u32 drop_flags) |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2890 | { |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2891 | ctrl_ctx->add_flags = cpu_to_le32(add_flags); |
| 2892 | ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2893 | xhci_slot_copy(xhci, in_ctx, out_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2894 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2895 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2896 | xhci_dbg(xhci, "Input Context:\n"); |
| 2897 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
Sarah Sharp | 5270b95 | 2009-09-04 10:53:11 -0700 | [diff] [blame] | 2898 | } |
| 2899 | |
Dmitry Torokhov | 8212a49 | 2011-02-08 13:55:59 -0800 | [diff] [blame] | 2900 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2901 | unsigned int slot_id, unsigned int ep_index, |
| 2902 | struct xhci_dequeue_state *deq_state) |
| 2903 | { |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2904 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2905 | struct xhci_container_ctx *in_ctx; |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2906 | struct xhci_ep_ctx *ep_ctx; |
| 2907 | u32 added_ctxs; |
| 2908 | dma_addr_t addr; |
| 2909 | |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2910 | in_ctx = xhci->devs[slot_id]->in_ctx; |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 2911 | ctrl_ctx = xhci_get_input_control_ctx(in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2912 | if (!ctrl_ctx) { |
| 2913 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 2914 | __func__); |
| 2915 | return; |
| 2916 | } |
| 2917 | |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2918 | xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, |
| 2919 | xhci->devs[slot_id]->out_ctx, ep_index); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2920 | ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); |
| 2921 | addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, |
| 2922 | deq_state->new_deq_ptr); |
| 2923 | if (addr == 0) { |
| 2924 | xhci_warn(xhci, "WARN Cannot submit config ep after " |
| 2925 | "reset ep command\n"); |
| 2926 | xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", |
| 2927 | deq_state->new_deq_seg, |
| 2928 | deq_state->new_deq_ptr); |
| 2929 | return; |
| 2930 | } |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 2931 | ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2932 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2933 | added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); |
Sarah Sharp | 913a8a3 | 2009-09-04 10:53:13 -0700 | [diff] [blame] | 2934 | xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 2935 | xhci->devs[slot_id]->out_ctx, ctrl_ctx, |
| 2936 | added_ctxs, added_ctxs); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2937 | } |
| 2938 | |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2939 | void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
Mathias Nyman | d97b4f8 | 2014-11-27 18:19:16 +0200 | [diff] [blame] | 2940 | unsigned int ep_index, struct xhci_td *td) |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2941 | { |
| 2942 | struct xhci_dequeue_state deq_state; |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2943 | struct xhci_virt_ep *ep; |
Mathias Nyman | d97b4f8 | 2014-11-27 18:19:16 +0200 | [diff] [blame] | 2944 | struct usb_device *udev = td->urb->dev; |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2945 | |
Xenia Ragiadakou | a025432 | 2013-08-06 07:52:46 +0300 | [diff] [blame] | 2946 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
| 2947 | "Cleaning up stalled endpoint ring"); |
Sarah Sharp | 63a0d9a | 2009-09-04 10:53:09 -0700 | [diff] [blame] | 2948 | ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2949 | /* We need to move the HW's dequeue pointer past this TD, |
| 2950 | * or it will attempt to resend it on the next doorbell ring. |
| 2951 | */ |
| 2952 | xhci_find_new_dequeue_state(xhci, udev->slot_id, |
Mathias Nyman | d97b4f8 | 2014-11-27 18:19:16 +0200 | [diff] [blame] | 2953 | ep_index, ep->stopped_stream, td, &deq_state); |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2954 | |
Mathias Nyman | 365038d | 2014-08-19 15:17:58 +0300 | [diff] [blame] | 2955 | if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) |
| 2956 | return; |
| 2957 | |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2958 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
| 2959 | * issue a configure endpoint command later. |
| 2960 | */ |
| 2961 | if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { |
Xenia Ragiadakou | a025432 | 2013-08-06 07:52:46 +0300 | [diff] [blame] | 2962 | xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
| 2963 | "Queueing new dequeue state"); |
Hans de Goede | 1e3452e | 2014-08-20 16:41:52 +0300 | [diff] [blame] | 2964 | xhci_queue_new_dequeue_state(xhci, udev->slot_id, |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 2965 | ep_index, ep->stopped_stream, &deq_state); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2966 | } else { |
| 2967 | /* Better hope no one uses the input context between now and the |
| 2968 | * reset endpoint completion! |
Sarah Sharp | e9df17e | 2010-04-02 15:34:43 -0700 | [diff] [blame] | 2969 | * XXX: No idea how this hardware will react when stream rings |
| 2970 | * are enabled. |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2971 | */ |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 2972 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 2973 | "Setting up input context for " |
| 2974 | "configure endpoint command"); |
Sarah Sharp | ac9d8fe | 2009-08-07 14:04:55 -0700 | [diff] [blame] | 2975 | xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, |
| 2976 | ep_index, &deq_state); |
| 2977 | } |
Sarah Sharp | 82d1009 | 2009-08-07 14:04:52 -0700 | [diff] [blame] | 2978 | } |
| 2979 | |
Mathias Nyman | d0167ad | 2015-03-10 19:49:00 +0200 | [diff] [blame] | 2980 | /* Called when clearing halted device. The core should have sent the control |
Mathias Nyman | 8e71a32 | 2014-11-18 11:27:12 +0200 | [diff] [blame] | 2981 | * message to clear the device halt condition. The host side of the halt should |
Mathias Nyman | d0167ad | 2015-03-10 19:49:00 +0200 | [diff] [blame] | 2982 | * already be cleared with a reset endpoint command issued when the STALL tx |
| 2983 | * event was received. |
| 2984 | * |
| 2985 | * Context: in_interrupt |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2986 | */ |
Mathias Nyman | 8e71a32 | 2014-11-18 11:27:12 +0200 | [diff] [blame] | 2987 | |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2988 | void xhci_endpoint_reset(struct usb_hcd *hcd, |
| 2989 | struct usb_host_endpoint *ep) |
| 2990 | { |
| 2991 | struct xhci_hcd *xhci; |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2992 | |
| 2993 | xhci = hcd_to_xhci(hcd); |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 2994 | |
Sarah Sharp | c92bcfa | 2009-07-27 12:05:21 -0700 | [diff] [blame] | 2995 | /* |
Mathias Nyman | d0167ad | 2015-03-10 19:49:00 +0200 | [diff] [blame] | 2996 | * We might need to implement the config ep cmd in xhci 4.8.1 note: |
Mathias Nyman | 8e71a32 | 2014-11-18 11:27:12 +0200 | [diff] [blame] | 2997 | * The Reset Endpoint Command may only be issued to endpoints in the |
| 2998 | * Halted state. If software wishes reset the Data Toggle or Sequence |
| 2999 | * Number of an endpoint that isn't in the Halted state, then software |
| 3000 | * may issue a Configure Endpoint Command with the Drop and Add bits set |
| 3001 | * for the target endpoint. that is in the Stopped state. |
Sarah Sharp | c92bcfa | 2009-07-27 12:05:21 -0700 | [diff] [blame] | 3002 | */ |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 3003 | |
Mathias Nyman | d0167ad | 2015-03-10 19:49:00 +0200 | [diff] [blame] | 3004 | /* For now just print debug to follow the situation */ |
| 3005 | xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", |
| 3006 | ep->desc.bEndpointAddress); |
Sarah Sharp | a1587d9 | 2009-07-27 12:03:15 -0700 | [diff] [blame] | 3007 | } |
| 3008 | |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3009 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
| 3010 | struct usb_device *udev, struct usb_host_endpoint *ep, |
| 3011 | unsigned int slot_id) |
| 3012 | { |
| 3013 | int ret; |
| 3014 | unsigned int ep_index; |
| 3015 | unsigned int ep_state; |
| 3016 | |
| 3017 | if (!ep) |
| 3018 | return -EINVAL; |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3019 | ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3020 | if (ret <= 0) |
| 3021 | return -EINVAL; |
Hans de Goede | a390153 | 2013-10-04 17:05:55 +0200 | [diff] [blame] | 3022 | if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3023 | xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" |
| 3024 | " descriptor for ep 0x%x does not support streams\n", |
| 3025 | ep->desc.bEndpointAddress); |
| 3026 | return -EINVAL; |
| 3027 | } |
| 3028 | |
| 3029 | ep_index = xhci_get_endpoint_index(&ep->desc); |
| 3030 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
| 3031 | if (ep_state & EP_HAS_STREAMS || |
| 3032 | ep_state & EP_GETTING_STREAMS) { |
| 3033 | xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " |
| 3034 | "already has streams set up.\n", |
| 3035 | ep->desc.bEndpointAddress); |
| 3036 | xhci_warn(xhci, "Send email to xHCI maintainer and ask for " |
| 3037 | "dynamic stream context array reallocation.\n"); |
| 3038 | return -EINVAL; |
| 3039 | } |
| 3040 | if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { |
| 3041 | xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " |
| 3042 | "endpoint 0x%x; URBs are pending.\n", |
| 3043 | ep->desc.bEndpointAddress); |
| 3044 | return -EINVAL; |
| 3045 | } |
| 3046 | return 0; |
| 3047 | } |
| 3048 | |
| 3049 | static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, |
| 3050 | unsigned int *num_streams, unsigned int *num_stream_ctxs) |
| 3051 | { |
| 3052 | unsigned int max_streams; |
| 3053 | |
| 3054 | /* The stream context array size must be a power of two */ |
| 3055 | *num_stream_ctxs = roundup_pow_of_two(*num_streams); |
| 3056 | /* |
| 3057 | * Find out how many primary stream array entries the host controller |
| 3058 | * supports. Later we may use secondary stream arrays (similar to 2nd |
| 3059 | * level page entries), but that's an optional feature for xHCI host |
| 3060 | * controllers. xHCs must support at least 4 stream IDs. |
| 3061 | */ |
| 3062 | max_streams = HCC_MAX_PSA(xhci->hcc_params); |
| 3063 | if (*num_stream_ctxs > max_streams) { |
| 3064 | xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", |
| 3065 | max_streams); |
| 3066 | *num_stream_ctxs = max_streams; |
| 3067 | *num_streams = max_streams; |
| 3068 | } |
| 3069 | } |
| 3070 | |
| 3071 | /* Returns an error code if one of the endpoint already has streams. |
| 3072 | * This does not change any data structures, it only checks and gathers |
| 3073 | * information. |
| 3074 | */ |
| 3075 | static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, |
| 3076 | struct usb_device *udev, |
| 3077 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 3078 | unsigned int *num_streams, u32 *changed_ep_bitmask) |
| 3079 | { |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3080 | unsigned int max_streams; |
| 3081 | unsigned int endpoint_flag; |
| 3082 | int i; |
| 3083 | int ret; |
| 3084 | |
| 3085 | for (i = 0; i < num_eps; i++) { |
| 3086 | ret = xhci_check_streams_endpoint(xhci, udev, |
| 3087 | eps[i], udev->slot_id); |
| 3088 | if (ret < 0) |
| 3089 | return ret; |
| 3090 | |
Felipe Balbi | 18b7ede | 2012-01-02 13:35:41 +0200 | [diff] [blame] | 3091 | max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3092 | if (max_streams < (*num_streams - 1)) { |
| 3093 | xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", |
| 3094 | eps[i]->desc.bEndpointAddress, |
| 3095 | max_streams); |
| 3096 | *num_streams = max_streams+1; |
| 3097 | } |
| 3098 | |
| 3099 | endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); |
| 3100 | if (*changed_ep_bitmask & endpoint_flag) |
| 3101 | return -EINVAL; |
| 3102 | *changed_ep_bitmask |= endpoint_flag; |
| 3103 | } |
| 3104 | return 0; |
| 3105 | } |
| 3106 | |
| 3107 | static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, |
| 3108 | struct usb_device *udev, |
| 3109 | struct usb_host_endpoint **eps, unsigned int num_eps) |
| 3110 | { |
| 3111 | u32 changed_ep_bitmask = 0; |
| 3112 | unsigned int slot_id; |
| 3113 | unsigned int ep_index; |
| 3114 | unsigned int ep_state; |
| 3115 | int i; |
| 3116 | |
| 3117 | slot_id = udev->slot_id; |
| 3118 | if (!xhci->devs[slot_id]) |
| 3119 | return 0; |
| 3120 | |
| 3121 | for (i = 0; i < num_eps; i++) { |
| 3122 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3123 | ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; |
| 3124 | /* Are streams already being freed for the endpoint? */ |
| 3125 | if (ep_state & EP_GETTING_NO_STREAMS) { |
| 3126 | xhci_warn(xhci, "WARN Can't disable streams for " |
Joe Perches | 03e64e9 | 2013-07-16 19:25:59 -0700 | [diff] [blame] | 3127 | "endpoint 0x%x, " |
| 3128 | "streams are being disabled already\n", |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3129 | eps[i]->desc.bEndpointAddress); |
| 3130 | return 0; |
| 3131 | } |
| 3132 | /* Are there actually any streams to free? */ |
| 3133 | if (!(ep_state & EP_HAS_STREAMS) && |
| 3134 | !(ep_state & EP_GETTING_STREAMS)) { |
| 3135 | xhci_warn(xhci, "WARN Can't disable streams for " |
Joe Perches | 03e64e9 | 2013-07-16 19:25:59 -0700 | [diff] [blame] | 3136 | "endpoint 0x%x, " |
| 3137 | "streams are already disabled!\n", |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3138 | eps[i]->desc.bEndpointAddress); |
| 3139 | xhci_warn(xhci, "WARN xhci_free_streams() called " |
| 3140 | "with non-streams endpoint\n"); |
| 3141 | return 0; |
| 3142 | } |
| 3143 | changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); |
| 3144 | } |
| 3145 | return changed_ep_bitmask; |
| 3146 | } |
| 3147 | |
| 3148 | /* |
Luis de Bethencourt | c2a298d | 2015-06-30 16:48:54 +0200 | [diff] [blame] | 3149 | * The USB device drivers use this function (through the HCD interface in USB |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3150 | * core) to prepare a set of bulk endpoints to use streams. Streams are used to |
| 3151 | * coordinate mass storage command queueing across multiple endpoints (basically |
| 3152 | * a stream ID == a task ID). |
| 3153 | * |
| 3154 | * Setting up streams involves allocating the same size stream context array |
| 3155 | * for each endpoint and issuing a configure endpoint command for all endpoints. |
| 3156 | * |
| 3157 | * Don't allow the call to succeed if one endpoint only supports one stream |
| 3158 | * (which means it doesn't support streams at all). |
| 3159 | * |
| 3160 | * Drivers may get less stream IDs than they asked for, if the host controller |
| 3161 | * hardware or endpoints claim they can't support the number of requested |
| 3162 | * stream IDs. |
| 3163 | */ |
| 3164 | int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, |
| 3165 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 3166 | unsigned int num_streams, gfp_t mem_flags) |
| 3167 | { |
| 3168 | int i, ret; |
| 3169 | struct xhci_hcd *xhci; |
| 3170 | struct xhci_virt_device *vdev; |
| 3171 | struct xhci_command *config_cmd; |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3172 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3173 | unsigned int ep_index; |
| 3174 | unsigned int num_stream_ctxs; |
| 3175 | unsigned long flags; |
| 3176 | u32 changed_ep_bitmask = 0; |
| 3177 | |
| 3178 | if (!eps) |
| 3179 | return -EINVAL; |
| 3180 | |
| 3181 | /* Add one to the number of streams requested to account for |
| 3182 | * stream 0 that is reserved for xHCI usage. |
| 3183 | */ |
| 3184 | num_streams += 1; |
| 3185 | xhci = hcd_to_xhci(hcd); |
| 3186 | xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", |
| 3187 | num_streams); |
| 3188 | |
Hans de Goede | f792088 | 2013-11-15 12:14:38 +0100 | [diff] [blame] | 3189 | /* MaxPSASize value 0 (2 streams) means streams are not supported */ |
Hans de Goede | 8f873c1 | 2014-07-25 22:01:18 +0200 | [diff] [blame] | 3190 | if ((xhci->quirks & XHCI_BROKEN_STREAMS) || |
| 3191 | HCC_MAX_PSA(xhci->hcc_params) < 4) { |
Hans de Goede | f792088 | 2013-11-15 12:14:38 +0100 | [diff] [blame] | 3192 | xhci_dbg(xhci, "xHCI controller does not support streams.\n"); |
| 3193 | return -ENOSYS; |
| 3194 | } |
| 3195 | |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3196 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); |
| 3197 | if (!config_cmd) { |
| 3198 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); |
| 3199 | return -ENOMEM; |
| 3200 | } |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 3201 | ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3202 | if (!ctrl_ctx) { |
| 3203 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 3204 | __func__); |
| 3205 | xhci_free_command(xhci, config_cmd); |
| 3206 | return -ENOMEM; |
| 3207 | } |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3208 | |
| 3209 | /* Check to make sure all endpoints are not already configured for |
| 3210 | * streams. While we're at it, find the maximum number of streams that |
| 3211 | * all the endpoints will support and check for duplicate endpoints. |
| 3212 | */ |
| 3213 | spin_lock_irqsave(&xhci->lock, flags); |
| 3214 | ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, |
| 3215 | num_eps, &num_streams, &changed_ep_bitmask); |
| 3216 | if (ret < 0) { |
| 3217 | xhci_free_command(xhci, config_cmd); |
| 3218 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3219 | return ret; |
| 3220 | } |
| 3221 | if (num_streams <= 1) { |
| 3222 | xhci_warn(xhci, "WARN: endpoints can't handle " |
| 3223 | "more than one stream.\n"); |
| 3224 | xhci_free_command(xhci, config_cmd); |
| 3225 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3226 | return -EINVAL; |
| 3227 | } |
| 3228 | vdev = xhci->devs[udev->slot_id]; |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 3229 | /* Mark each endpoint as being in transition, so |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3230 | * xhci_urb_enqueue() will reject all URBs. |
| 3231 | */ |
| 3232 | for (i = 0; i < num_eps; i++) { |
| 3233 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3234 | vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; |
| 3235 | } |
| 3236 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3237 | |
| 3238 | /* Setup internal data structures and allocate HW data structures for |
| 3239 | * streams (but don't install the HW structures in the input context |
| 3240 | * until we're sure all memory allocation succeeded). |
| 3241 | */ |
| 3242 | xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); |
| 3243 | xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", |
| 3244 | num_stream_ctxs, num_streams); |
| 3245 | |
| 3246 | for (i = 0; i < num_eps; i++) { |
| 3247 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3248 | vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, |
| 3249 | num_stream_ctxs, |
| 3250 | num_streams, mem_flags); |
| 3251 | if (!vdev->eps[ep_index].stream_info) |
| 3252 | goto cleanup; |
| 3253 | /* Set maxPstreams in endpoint context and update deq ptr to |
| 3254 | * point to stream context array. FIXME |
| 3255 | */ |
| 3256 | } |
| 3257 | |
| 3258 | /* Set up the input context for a configure endpoint command. */ |
| 3259 | for (i = 0; i < num_eps; i++) { |
| 3260 | struct xhci_ep_ctx *ep_ctx; |
| 3261 | |
| 3262 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3263 | ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); |
| 3264 | |
| 3265 | xhci_endpoint_copy(xhci, config_cmd->in_ctx, |
| 3266 | vdev->out_ctx, ep_index); |
| 3267 | xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, |
| 3268 | vdev->eps[ep_index].stream_info); |
| 3269 | } |
| 3270 | /* Tell the HW to drop its old copy of the endpoint context info |
| 3271 | * and add the updated copy from the input context. |
| 3272 | */ |
| 3273 | xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3274 | vdev->out_ctx, ctrl_ctx, |
| 3275 | changed_ep_bitmask, changed_ep_bitmask); |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3276 | |
| 3277 | /* Issue and wait for the configure endpoint command */ |
| 3278 | ret = xhci_configure_endpoint(xhci, udev, config_cmd, |
| 3279 | false, false); |
| 3280 | |
| 3281 | /* xHC rejected the configure endpoint command for some reason, so we |
| 3282 | * leave the old ring intact and free our internal streams data |
| 3283 | * structure. |
| 3284 | */ |
| 3285 | if (ret < 0) |
| 3286 | goto cleanup; |
| 3287 | |
| 3288 | spin_lock_irqsave(&xhci->lock, flags); |
| 3289 | for (i = 0; i < num_eps; i++) { |
| 3290 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3291 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
| 3292 | xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", |
| 3293 | udev->slot_id, ep_index); |
| 3294 | vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; |
| 3295 | } |
| 3296 | xhci_free_command(xhci, config_cmd); |
| 3297 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3298 | |
| 3299 | /* Subtract 1 for stream 0, which drivers can't use */ |
| 3300 | return num_streams - 1; |
| 3301 | |
| 3302 | cleanup: |
| 3303 | /* If it didn't work, free the streams! */ |
| 3304 | for (i = 0; i < num_eps; i++) { |
| 3305 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3306 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); |
Sarah Sharp | 8a00774 | 2010-04-30 15:37:56 -0700 | [diff] [blame] | 3307 | vdev->eps[ep_index].stream_info = NULL; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3308 | /* FIXME Unset maxPstreams in endpoint context and |
| 3309 | * update deq ptr to point to normal string ring. |
| 3310 | */ |
| 3311 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; |
| 3312 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
| 3313 | xhci_endpoint_zero(xhci, vdev, eps[i]); |
| 3314 | } |
| 3315 | xhci_free_command(xhci, config_cmd); |
| 3316 | return -ENOMEM; |
| 3317 | } |
| 3318 | |
| 3319 | /* Transition the endpoint from using streams to being a "normal" endpoint |
| 3320 | * without streams. |
| 3321 | * |
| 3322 | * Modify the endpoint context state, submit a configure endpoint command, |
| 3323 | * and free all endpoint rings for streams if that completes successfully. |
| 3324 | */ |
| 3325 | int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, |
| 3326 | struct usb_host_endpoint **eps, unsigned int num_eps, |
| 3327 | gfp_t mem_flags) |
| 3328 | { |
| 3329 | int i, ret; |
| 3330 | struct xhci_hcd *xhci; |
| 3331 | struct xhci_virt_device *vdev; |
| 3332 | struct xhci_command *command; |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3333 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3334 | unsigned int ep_index; |
| 3335 | unsigned long flags; |
| 3336 | u32 changed_ep_bitmask; |
| 3337 | |
| 3338 | xhci = hcd_to_xhci(hcd); |
| 3339 | vdev = xhci->devs[udev->slot_id]; |
| 3340 | |
| 3341 | /* Set up a configure endpoint command to remove the streams rings */ |
| 3342 | spin_lock_irqsave(&xhci->lock, flags); |
| 3343 | changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, |
| 3344 | udev, eps, num_eps); |
| 3345 | if (changed_ep_bitmask == 0) { |
| 3346 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3347 | return -EINVAL; |
| 3348 | } |
| 3349 | |
| 3350 | /* Use the xhci_command structure from the first endpoint. We may have |
| 3351 | * allocated too many, but the driver may call xhci_free_streams() for |
| 3352 | * each endpoint it grouped into one call to xhci_alloc_streams(). |
| 3353 | */ |
| 3354 | ep_index = xhci_get_endpoint_index(&eps[0]->desc); |
| 3355 | command = vdev->eps[ep_index].stream_info->free_streams_command; |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 3356 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3357 | if (!ctrl_ctx) { |
Emil Goode | 1f21569 | 2013-06-25 15:49:36 -0700 | [diff] [blame] | 3358 | spin_unlock_irqrestore(&xhci->lock, flags); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3359 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 3360 | __func__); |
| 3361 | return -EINVAL; |
| 3362 | } |
| 3363 | |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3364 | for (i = 0; i < num_eps; i++) { |
| 3365 | struct xhci_ep_ctx *ep_ctx; |
| 3366 | |
| 3367 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3368 | ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); |
| 3369 | xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= |
| 3370 | EP_GETTING_NO_STREAMS; |
| 3371 | |
| 3372 | xhci_endpoint_copy(xhci, command->in_ctx, |
| 3373 | vdev->out_ctx, ep_index); |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 3374 | xhci_setup_no_streams_ep_input_ctx(ep_ctx, |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3375 | &vdev->eps[ep_index]); |
| 3376 | } |
| 3377 | xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3378 | vdev->out_ctx, ctrl_ctx, |
| 3379 | changed_ep_bitmask, changed_ep_bitmask); |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3380 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3381 | |
| 3382 | /* Issue and wait for the configure endpoint command, |
| 3383 | * which must succeed. |
| 3384 | */ |
| 3385 | ret = xhci_configure_endpoint(xhci, udev, command, |
| 3386 | false, true); |
| 3387 | |
| 3388 | /* xHC rejected the configure endpoint command for some reason, so we |
| 3389 | * leave the streams rings intact. |
| 3390 | */ |
| 3391 | if (ret < 0) |
| 3392 | return ret; |
| 3393 | |
| 3394 | spin_lock_irqsave(&xhci->lock, flags); |
| 3395 | for (i = 0; i < num_eps; i++) { |
| 3396 | ep_index = xhci_get_endpoint_index(&eps[i]->desc); |
| 3397 | xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); |
Sarah Sharp | 8a00774 | 2010-04-30 15:37:56 -0700 | [diff] [blame] | 3398 | vdev->eps[ep_index].stream_info = NULL; |
Sarah Sharp | 8df75f4 | 2010-04-02 15:34:16 -0700 | [diff] [blame] | 3399 | /* FIXME Unset maxPstreams in endpoint context and |
| 3400 | * update deq ptr to point to normal string ring. |
| 3401 | */ |
| 3402 | vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; |
| 3403 | vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; |
| 3404 | } |
| 3405 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3406 | |
| 3407 | return 0; |
| 3408 | } |
| 3409 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3410 | /* |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3411 | * Deletes endpoint resources for endpoints that were active before a Reset |
| 3412 | * Device command, or a Disable Slot command. The Reset Device command leaves |
| 3413 | * the control endpoint intact, whereas the Disable Slot command deletes it. |
| 3414 | * |
| 3415 | * Must be called with xhci->lock held. |
| 3416 | */ |
| 3417 | void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, |
| 3418 | struct xhci_virt_device *virt_dev, bool drop_control_ep) |
| 3419 | { |
| 3420 | int i; |
| 3421 | unsigned int num_dropped_eps = 0; |
| 3422 | unsigned int drop_flags = 0; |
| 3423 | |
| 3424 | for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { |
| 3425 | if (virt_dev->eps[i].ring) { |
| 3426 | drop_flags |= 1 << i; |
| 3427 | num_dropped_eps++; |
| 3428 | } |
| 3429 | } |
| 3430 | xhci->num_active_eps -= num_dropped_eps; |
| 3431 | if (num_dropped_eps) |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 3432 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 3433 | "Dropped %u ep ctxs, flags = 0x%x, " |
| 3434 | "%u now active.", |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3435 | num_dropped_eps, drop_flags, |
| 3436 | xhci->num_active_eps); |
| 3437 | } |
| 3438 | |
| 3439 | /* |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3440 | * This submits a Reset Device Command, which will set the device state to 0, |
| 3441 | * set the device address to 0, and disable all the endpoints except the default |
| 3442 | * control endpoint. The USB core should come back and call |
| 3443 | * xhci_address_device(), and then re-set up the configuration. If this is |
| 3444 | * called because of a usb_reset_and_verify_device(), then the old alternate |
| 3445 | * settings will be re-installed through the normal bandwidth allocation |
| 3446 | * functions. |
| 3447 | * |
| 3448 | * Wait for the Reset Device command to finish. Remove all structures |
| 3449 | * associated with the endpoints that were disabled. Clear the input device |
| 3450 | * structure? Cache the rings? Reset the control endpoint 0 max packet size? |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3451 | * |
| 3452 | * If the virt_dev to be reset does not exist or does not match the udev, |
| 3453 | * it means the device is lost, possibly due to the xHC restore error and |
| 3454 | * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to |
| 3455 | * re-allocate the device. |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3456 | */ |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3457 | int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3458 | { |
| 3459 | int ret, i; |
| 3460 | unsigned long flags; |
| 3461 | struct xhci_hcd *xhci; |
| 3462 | unsigned int slot_id; |
| 3463 | struct xhci_virt_device *virt_dev; |
| 3464 | struct xhci_command *reset_device_cmd; |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3465 | int last_freed_endpoint; |
Maarten Lankhorst | 001fd38 | 2011-06-01 23:27:50 +0200 | [diff] [blame] | 3466 | struct xhci_slot_ctx *slot_ctx; |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3467 | int old_active_eps = 0; |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3468 | |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3469 | ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3470 | if (ret <= 0) |
| 3471 | return ret; |
| 3472 | xhci = hcd_to_xhci(hcd); |
| 3473 | slot_id = udev->slot_id; |
| 3474 | virt_dev = xhci->devs[slot_id]; |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3475 | if (!virt_dev) { |
| 3476 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
| 3477 | "not exist. Re-allocate the device\n", slot_id); |
| 3478 | ret = xhci_alloc_dev(hcd, udev); |
| 3479 | if (ret == 1) |
| 3480 | return 0; |
| 3481 | else |
| 3482 | return -EINVAL; |
| 3483 | } |
| 3484 | |
Brian Campbell | 326124a | 2015-07-21 17:20:28 +0300 | [diff] [blame] | 3485 | if (virt_dev->tt_info) |
| 3486 | old_active_eps = virt_dev->tt_info->active_eps; |
| 3487 | |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3488 | if (virt_dev->udev != udev) { |
| 3489 | /* If the virt_dev and the udev does not match, this virt_dev |
| 3490 | * may belong to another udev. |
| 3491 | * Re-allocate the device. |
| 3492 | */ |
| 3493 | xhci_dbg(xhci, "The device to be reset with slot ID %u does " |
| 3494 | "not match the udev. Re-allocate the device\n", |
| 3495 | slot_id); |
| 3496 | ret = xhci_alloc_dev(hcd, udev); |
| 3497 | if (ret == 1) |
| 3498 | return 0; |
| 3499 | else |
| 3500 | return -EINVAL; |
| 3501 | } |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3502 | |
Maarten Lankhorst | 001fd38 | 2011-06-01 23:27:50 +0200 | [diff] [blame] | 3503 | /* If device is not setup, there is no point in resetting it */ |
| 3504 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
| 3505 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
| 3506 | SLOT_STATE_DISABLED) |
| 3507 | return 0; |
| 3508 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3509 | xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); |
| 3510 | /* Allocate the command structure that holds the struct completion. |
| 3511 | * Assume we're in process context, since the normal device reset |
| 3512 | * process has to wait for the device anyway. Storage devices are |
| 3513 | * reset as part of error handling, so use GFP_NOIO instead of |
| 3514 | * GFP_KERNEL. |
| 3515 | */ |
| 3516 | reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); |
| 3517 | if (!reset_device_cmd) { |
| 3518 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); |
| 3519 | return -ENOMEM; |
| 3520 | } |
| 3521 | |
| 3522 | /* Attempt to submit the Reset Device command to the command ring */ |
| 3523 | spin_lock_irqsave(&xhci->lock, flags); |
Paul Zimmerman | 7a3783e | 2010-11-17 16:26:50 -0800 | [diff] [blame] | 3524 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3525 | ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3526 | if (ret) { |
| 3527 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3528 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3529 | goto command_cleanup; |
| 3530 | } |
| 3531 | xhci_ring_cmd_db(xhci); |
| 3532 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3533 | |
| 3534 | /* Wait for the Reset Device command to finish */ |
Mathias Nyman | c311e39 | 2014-05-08 19:26:03 +0300 | [diff] [blame] | 3535 | wait_for_completion(reset_device_cmd->completion); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3536 | |
| 3537 | /* The Reset Device command can't fail, according to the 0.95/0.96 spec, |
| 3538 | * unless we tried to reset a slot ID that wasn't enabled, |
| 3539 | * or the device wasn't in the addressed or configured state. |
| 3540 | */ |
| 3541 | ret = reset_device_cmd->status; |
| 3542 | switch (ret) { |
Mathias Nyman | c311e39 | 2014-05-08 19:26:03 +0300 | [diff] [blame] | 3543 | case COMP_CMD_ABORT: |
| 3544 | case COMP_CMD_STOP: |
| 3545 | xhci_warn(xhci, "Timeout waiting for reset device command\n"); |
| 3546 | ret = -ETIME; |
| 3547 | goto command_cleanup; |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3548 | case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ |
| 3549 | case COMP_CTX_STATE: /* 0.96 completion code for same thing */ |
Xenia Ragiadakou | 38a532a | 2013-07-02 17:49:25 +0300 | [diff] [blame] | 3550 | xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3551 | slot_id, |
| 3552 | xhci_get_slot_state(xhci, virt_dev->out_ctx)); |
Xenia Ragiadakou | 38a532a | 2013-07-02 17:49:25 +0300 | [diff] [blame] | 3553 | xhci_dbg(xhci, "Not freeing device rings.\n"); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3554 | /* Don't treat this as an error. May change my mind later. */ |
| 3555 | ret = 0; |
| 3556 | goto command_cleanup; |
| 3557 | case COMP_SUCCESS: |
| 3558 | xhci_dbg(xhci, "Successful reset device command.\n"); |
| 3559 | break; |
| 3560 | default: |
| 3561 | if (xhci_is_vendor_info_code(xhci, ret)) |
| 3562 | break; |
| 3563 | xhci_warn(xhci, "Unknown completion code %u for " |
| 3564 | "reset device command.\n", ret); |
| 3565 | ret = -EINVAL; |
| 3566 | goto command_cleanup; |
| 3567 | } |
| 3568 | |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3569 | /* Free up host controller endpoint resources */ |
| 3570 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 3571 | spin_lock_irqsave(&xhci->lock, flags); |
| 3572 | /* Don't delete the default control endpoint resources */ |
| 3573 | xhci_free_device_endpoint_resources(xhci, virt_dev, false); |
| 3574 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3575 | } |
| 3576 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3577 | /* Everything but endpoint 0 is disabled, so free or cache the rings. */ |
| 3578 | last_freed_endpoint = 1; |
| 3579 | for (i = 1; i < 31; ++i) { |
Dmitry Torokhov | 2dea75d | 2011-04-12 23:06:28 -0700 | [diff] [blame] | 3580 | struct xhci_virt_ep *ep = &virt_dev->eps[i]; |
| 3581 | |
| 3582 | if (ep->ep_state & EP_HAS_STREAMS) { |
Hans de Goede | df61383 | 2013-10-04 00:29:45 +0200 | [diff] [blame] | 3583 | xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", |
| 3584 | xhci_get_endpoint_address(i)); |
Dmitry Torokhov | 2dea75d | 2011-04-12 23:06:28 -0700 | [diff] [blame] | 3585 | xhci_free_stream_info(xhci, ep->stream_info); |
| 3586 | ep->stream_info = NULL; |
| 3587 | ep->ep_state &= ~EP_HAS_STREAMS; |
| 3588 | } |
| 3589 | |
| 3590 | if (ep->ring) { |
| 3591 | xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); |
| 3592 | last_freed_endpoint = i; |
| 3593 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3594 | if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) |
| 3595 | xhci_drop_ep_from_interval_table(xhci, |
| 3596 | &virt_dev->eps[i].bw_info, |
| 3597 | virt_dev->bw_table, |
| 3598 | udev, |
| 3599 | &virt_dev->eps[i], |
| 3600 | virt_dev->tt_info); |
Sarah Sharp | 9af5d71 | 2011-09-02 11:05:48 -0700 | [diff] [blame] | 3601 | xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3602 | } |
Sarah Sharp | 2e27980 | 2011-09-02 11:05:50 -0700 | [diff] [blame] | 3603 | /* If necessary, update the number of active TTs on this root port */ |
| 3604 | xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); |
| 3605 | |
Sarah Sharp | 2a8f82c | 2009-12-09 15:59:13 -0800 | [diff] [blame] | 3606 | xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); |
| 3607 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); |
| 3608 | ret = 0; |
| 3609 | |
| 3610 | command_cleanup: |
| 3611 | xhci_free_command(xhci, reset_device_cmd); |
| 3612 | return ret; |
| 3613 | } |
| 3614 | |
| 3615 | /* |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3616 | * At this point, the struct usb_device is about to go away, the device has |
| 3617 | * disconnected, and all traffic has been stopped and the endpoints have been |
| 3618 | * disabled. Free any HC data structures associated with that device. |
| 3619 | */ |
| 3620 | void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) |
| 3621 | { |
| 3622 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3623 | struct xhci_virt_device *virt_dev; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3624 | unsigned long flags; |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3625 | u32 state; |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3626 | int i, ret; |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3627 | struct xhci_command *command; |
| 3628 | |
| 3629 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); |
| 3630 | if (!command) |
| 3631 | return; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3632 | |
Shawn Nematbakhsh | c8476fb | 2013-08-19 10:36:13 -0700 | [diff] [blame] | 3633 | #ifndef CONFIG_USB_DEFAULT_PERSIST |
| 3634 | /* |
| 3635 | * We called pm_runtime_get_noresume when the device was attached. |
| 3636 | * Decrement the counter here to allow controller to runtime suspend |
| 3637 | * if no devices remain. |
| 3638 | */ |
| 3639 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
Sarah Sharp | e7ecf06 | 2013-08-28 09:31:04 -0700 | [diff] [blame] | 3640 | pm_runtime_put_noidle(hcd->self.controller); |
Shawn Nematbakhsh | c8476fb | 2013-08-19 10:36:13 -0700 | [diff] [blame] | 3641 | #endif |
| 3642 | |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3643 | ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 3644 | /* If the host is halted due to driver unload, we still need to free the |
| 3645 | * device. |
| 3646 | */ |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3647 | if (ret <= 0 && ret != -ENODEV) { |
| 3648 | kfree(command); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3649 | return; |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3650 | } |
Andiry Xu | 6492773 | 2010-10-14 07:22:45 -0700 | [diff] [blame] | 3651 | |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3652 | virt_dev = xhci->devs[udev->slot_id]; |
Sarah Sharp | 6f5165c | 2009-10-27 10:57:01 -0700 | [diff] [blame] | 3653 | |
| 3654 | /* Stop any wayward timer functions (which may grab the lock) */ |
| 3655 | for (i = 0; i < 31; ++i) { |
| 3656 | virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; |
| 3657 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
| 3658 | } |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3659 | |
| 3660 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3661 | /* Don't disable the slot if the host controller is dead. */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 3662 | state = readl(&xhci->op_regs->status); |
Sarah Sharp | 7bd89b4 | 2011-07-01 13:35:40 -0700 | [diff] [blame] | 3663 | if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || |
| 3664 | (xhci->xhc_state & XHCI_STATE_HALTED)) { |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3665 | xhci_free_virt_device(xhci, udev->slot_id); |
| 3666 | spin_unlock_irqrestore(&xhci->lock, flags); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3667 | kfree(command); |
Sarah Sharp | c526d0d | 2009-09-16 16:42:39 -0700 | [diff] [blame] | 3668 | return; |
| 3669 | } |
| 3670 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3671 | if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, |
| 3672 | udev->slot_id)) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3673 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3674 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
| 3675 | return; |
| 3676 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3677 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3678 | spin_unlock_irqrestore(&xhci->lock, flags); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3679 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3680 | /* |
| 3681 | * Event command completion handler will free any data structures |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 3682 | * associated with the slot. XXX Can free sleep? |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3683 | */ |
| 3684 | } |
| 3685 | |
| 3686 | /* |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3687 | * Checks if we have enough host controller resources for the default control |
| 3688 | * endpoint. |
| 3689 | * |
| 3690 | * Must be called with xhci->lock held. |
| 3691 | */ |
| 3692 | static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) |
| 3693 | { |
| 3694 | if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 3695 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 3696 | "Not enough ep ctxs: " |
| 3697 | "%u active, need to add 1, limit is %u.", |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3698 | xhci->num_active_eps, xhci->limit_active_eps); |
| 3699 | return -ENOMEM; |
| 3700 | } |
| 3701 | xhci->num_active_eps += 1; |
Xenia Ragiadakou | 4bdfe4c | 2013-08-06 07:52:45 +0300 | [diff] [blame] | 3702 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 3703 | "Adding 1 ep ctx, %u now active.", |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3704 | xhci->num_active_eps); |
| 3705 | return 0; |
| 3706 | } |
| 3707 | |
| 3708 | |
| 3709 | /* |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3710 | * Returns 0 if the xHC ran out of device slots, the Enable Slot command |
| 3711 | * timed out, or allocating memory failed. Returns 1 on success. |
| 3712 | */ |
| 3713 | int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) |
| 3714 | { |
| 3715 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 3716 | unsigned long flags; |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3717 | int ret, slot_id; |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3718 | struct xhci_command *command; |
| 3719 | |
| 3720 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); |
| 3721 | if (!command) |
| 3722 | return 0; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3723 | |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3724 | /* xhci->slot_id and xhci->addr_dev are not thread-safe */ |
| 3725 | mutex_lock(&xhci->mutex); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3726 | spin_lock_irqsave(&xhci->lock, flags); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3727 | command->completion = &xhci->addr_dev; |
| 3728 | ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3729 | if (ret) { |
| 3730 | spin_unlock_irqrestore(&xhci->lock, flags); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3731 | mutex_unlock(&xhci->mutex); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3732 | xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3733 | kfree(command); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3734 | return 0; |
| 3735 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3736 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3737 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3738 | |
Mathias Nyman | c311e39 | 2014-05-08 19:26:03 +0300 | [diff] [blame] | 3739 | wait_for_completion(command->completion); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3740 | slot_id = xhci->slot_id; |
| 3741 | mutex_unlock(&xhci->mutex); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3742 | |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3743 | if (!slot_id || command->status != COMP_SUCCESS) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3744 | xhci_err(xhci, "Error while assigning device slot ID\n"); |
Sarah Sharp | be98203 | 2014-05-08 19:25:59 +0300 | [diff] [blame] | 3745 | xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", |
| 3746 | HCS_MAX_SLOTS( |
| 3747 | readl(&xhci->cap_regs->hcs_params1))); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3748 | kfree(command); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3749 | return 0; |
| 3750 | } |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3751 | |
| 3752 | if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { |
| 3753 | spin_lock_irqsave(&xhci->lock, flags); |
| 3754 | ret = xhci_reserve_host_control_ep_resources(xhci); |
| 3755 | if (ret) { |
| 3756 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3757 | xhci_warn(xhci, "Not enough host resources, " |
| 3758 | "active endpoint contexts = %u\n", |
| 3759 | xhci->num_active_eps); |
| 3760 | goto disable_slot; |
| 3761 | } |
| 3762 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3763 | } |
| 3764 | /* Use GFP_NOIO, since this function can be called from |
Sarah Sharp | a6d940d | 2010-12-28 13:08:42 -0800 | [diff] [blame] | 3765 | * xhci_discover_or_reset_device(), which may be called as part of |
| 3766 | * mass storage driver error handling. |
| 3767 | */ |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3768 | if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3769 | xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3770 | goto disable_slot; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3771 | } |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3772 | udev->slot_id = slot_id; |
Shawn Nematbakhsh | c8476fb | 2013-08-19 10:36:13 -0700 | [diff] [blame] | 3773 | |
| 3774 | #ifndef CONFIG_USB_DEFAULT_PERSIST |
| 3775 | /* |
| 3776 | * If resetting upon resume, we can't put the controller into runtime |
| 3777 | * suspend if there is a device attached. |
| 3778 | */ |
| 3779 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
Sarah Sharp | e7ecf06 | 2013-08-28 09:31:04 -0700 | [diff] [blame] | 3780 | pm_runtime_get_noresume(hcd->self.controller); |
Shawn Nematbakhsh | c8476fb | 2013-08-19 10:36:13 -0700 | [diff] [blame] | 3781 | #endif |
| 3782 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3783 | |
| 3784 | kfree(command); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3785 | /* Is this a LS or FS device under a HS hub? */ |
| 3786 | /* Hub or peripherial? */ |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3787 | return 1; |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3788 | |
| 3789 | disable_slot: |
| 3790 | /* Disable slot, if we can do it without mem alloc */ |
| 3791 | spin_lock_irqsave(&xhci->lock, flags); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3792 | command->completion = NULL; |
| 3793 | command->status = 0; |
| 3794 | if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, |
| 3795 | udev->slot_id)) |
Sarah Sharp | 2cf95c1 | 2011-05-11 16:14:58 -0700 | [diff] [blame] | 3796 | xhci_ring_cmd_db(xhci); |
| 3797 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3798 | return 0; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3799 | } |
| 3800 | |
| 3801 | /* |
Dan Williams | 48fc7db | 2013-12-05 17:07:27 -0800 | [diff] [blame] | 3802 | * Issue an Address Device command and optionally send a corresponding |
| 3803 | * SetAddress request to the device. |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3804 | */ |
Dan Williams | 48fc7db | 2013-12-05 17:07:27 -0800 | [diff] [blame] | 3805 | static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, |
| 3806 | enum xhci_setup_dev setup) |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3807 | { |
Dan Williams | 6f8ffc0 | 2013-11-22 01:20:01 -0800 | [diff] [blame] | 3808 | const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3809 | unsigned long flags; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3810 | struct xhci_virt_device *virt_dev; |
| 3811 | int ret = 0; |
| 3812 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3813 | struct xhci_slot_ctx *slot_ctx; |
| 3814 | struct xhci_input_control_ctx *ctrl_ctx; |
Sarah Sharp | 8e595a5 | 2009-07-27 12:03:31 -0700 | [diff] [blame] | 3815 | u64 temp_64; |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3816 | struct xhci_command *command = NULL; |
| 3817 | |
| 3818 | mutex_lock(&xhci->mutex); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3819 | |
Roger Quadros | 448116b | 2015-09-21 17:46:15 +0300 | [diff] [blame] | 3820 | if (xhci->xhc_state) /* dying or halted */ |
| 3821 | goto out; |
| 3822 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3823 | if (!udev->slot_id) { |
Xenia Ragiadakou | 84a99f6 | 2013-08-06 00:22:15 +0300 | [diff] [blame] | 3824 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
| 3825 | "Bad Slot ID %d", udev->slot_id); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3826 | ret = -EINVAL; |
| 3827 | goto out; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3828 | } |
| 3829 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3830 | virt_dev = xhci->devs[udev->slot_id]; |
| 3831 | |
Matt Evans | 7ed603e | 2011-03-29 13:40:56 +1100 | [diff] [blame] | 3832 | if (WARN_ON(!virt_dev)) { |
| 3833 | /* |
| 3834 | * In plug/unplug torture test with an NEC controller, |
| 3835 | * a zero-dereference was observed once due to virt_dev = 0. |
| 3836 | * Print useful debug rather than crash if it is observed again! |
| 3837 | */ |
| 3838 | xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", |
| 3839 | udev->slot_id); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3840 | ret = -EINVAL; |
| 3841 | goto out; |
Matt Evans | 7ed603e | 2011-03-29 13:40:56 +1100 | [diff] [blame] | 3842 | } |
| 3843 | |
Mathias Nyman | f161ead | 2015-01-09 17:18:28 +0200 | [diff] [blame] | 3844 | if (setup == SETUP_CONTEXT_ONLY) { |
| 3845 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
| 3846 | if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == |
| 3847 | SLOT_STATE_DEFAULT) { |
| 3848 | xhci_dbg(xhci, "Slot already in default state\n"); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3849 | goto out; |
Mathias Nyman | f161ead | 2015-01-09 17:18:28 +0200 | [diff] [blame] | 3850 | } |
| 3851 | } |
| 3852 | |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3853 | command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3854 | if (!command) { |
| 3855 | ret = -ENOMEM; |
| 3856 | goto out; |
| 3857 | } |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3858 | |
| 3859 | command->in_ctx = virt_dev->in_ctx; |
| 3860 | command->completion = &xhci->addr_dev; |
| 3861 | |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3862 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 3863 | ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3864 | if (!ctrl_ctx) { |
| 3865 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 3866 | __func__); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3867 | ret = -EINVAL; |
| 3868 | goto out; |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 3869 | } |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3870 | /* |
| 3871 | * If this is the first Set Address since device plug-in or |
| 3872 | * virt_device realloaction after a resume with an xHCI power loss, |
| 3873 | * then set up the slot context. |
| 3874 | */ |
| 3875 | if (!slot_ctx->dev_info) |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3876 | xhci_setup_addressable_virt_dev(xhci, udev); |
Andiry Xu | f0615c4 | 2010-10-14 07:22:48 -0700 | [diff] [blame] | 3877 | /* Otherwise, update the control endpoint ring enqueue pointer. */ |
Sarah Sharp | 2d1ee59 | 2010-07-09 17:08:54 +0200 | [diff] [blame] | 3878 | else |
| 3879 | xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); |
Sarah Sharp | d31c285 | 2011-11-03 13:06:08 -0700 | [diff] [blame] | 3880 | ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); |
| 3881 | ctrl_ctx->drop_flags = 0; |
| 3882 | |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 3883 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3884 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
Xenia Ragiadakou | 1d27fab | 2013-08-06 07:52:47 +0300 | [diff] [blame] | 3885 | trace_xhci_address_ctx(xhci, virt_dev->in_ctx, |
Xenia Ragiadakou | 0c052aa | 2013-11-15 03:18:07 +0200 | [diff] [blame] | 3886 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3887 | |
Sarah Sharp | f88ba78 | 2009-05-14 11:44:22 -0700 | [diff] [blame] | 3888 | spin_lock_irqsave(&xhci->lock, flags); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3889 | ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, |
Dan Williams | 48fc7db | 2013-12-05 17:07:27 -0800 | [diff] [blame] | 3890 | udev->slot_id, setup); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3891 | if (ret) { |
| 3892 | spin_unlock_irqrestore(&xhci->lock, flags); |
Xenia Ragiadakou | 84a99f6 | 2013-08-06 00:22:15 +0300 | [diff] [blame] | 3893 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
| 3894 | "FIXME: allocate a command ring segment"); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3895 | goto out; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3896 | } |
Sarah Sharp | 23e3be1 | 2009-04-29 19:05:20 -0700 | [diff] [blame] | 3897 | xhci_ring_cmd_db(xhci); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3898 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 3899 | |
| 3900 | /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ |
Mathias Nyman | c311e39 | 2014-05-08 19:26:03 +0300 | [diff] [blame] | 3901 | wait_for_completion(command->completion); |
| 3902 | |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3903 | /* FIXME: From section 4.3.4: "Software shall be responsible for timing |
| 3904 | * the SetAddress() "recovery interval" required by USB and aborting the |
| 3905 | * command on a timeout. |
| 3906 | */ |
Mathias Nyman | 9ea1833 | 2014-05-08 19:26:02 +0300 | [diff] [blame] | 3907 | switch (command->status) { |
Mathias Nyman | c311e39 | 2014-05-08 19:26:03 +0300 | [diff] [blame] | 3908 | case COMP_CMD_ABORT: |
| 3909 | case COMP_CMD_STOP: |
| 3910 | xhci_warn(xhci, "Timeout while waiting for setup device command\n"); |
| 3911 | ret = -ETIME; |
| 3912 | break; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3913 | case COMP_CTX_STATE: |
| 3914 | case COMP_EBADSLT: |
Dan Williams | 6f8ffc0 | 2013-11-22 01:20:01 -0800 | [diff] [blame] | 3915 | xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", |
| 3916 | act, udev->slot_id); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3917 | ret = -EINVAL; |
| 3918 | break; |
| 3919 | case COMP_TX_ERR: |
Dan Williams | 6f8ffc0 | 2013-11-22 01:20:01 -0800 | [diff] [blame] | 3920 | dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3921 | ret = -EPROTO; |
| 3922 | break; |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 3923 | case COMP_DEV_ERR: |
Dan Williams | 6f8ffc0 | 2013-11-22 01:20:01 -0800 | [diff] [blame] | 3924 | dev_warn(&udev->dev, |
| 3925 | "ERROR: Incompatible device for setup %s command\n", act); |
Alex He | f6ba6fe | 2011-06-08 18:34:06 +0800 | [diff] [blame] | 3926 | ret = -ENODEV; |
| 3927 | break; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3928 | case COMP_SUCCESS: |
Xenia Ragiadakou | 84a99f6 | 2013-08-06 00:22:15 +0300 | [diff] [blame] | 3929 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
Dan Williams | 6f8ffc0 | 2013-11-22 01:20:01 -0800 | [diff] [blame] | 3930 | "Successful setup %s command", act); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3931 | break; |
| 3932 | default: |
Dan Williams | 6f8ffc0 | 2013-11-22 01:20:01 -0800 | [diff] [blame] | 3933 | xhci_err(xhci, |
| 3934 | "ERROR: unexpected setup %s command completion code 0x%x.\n", |
Mathias Nyman | 9ea1833 | 2014-05-08 19:26:02 +0300 | [diff] [blame] | 3935 | act, command->status); |
Sarah Sharp | 66e49d8 | 2009-07-27 12:03:46 -0700 | [diff] [blame] | 3936 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3937 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
Xenia Ragiadakou | 1d27fab | 2013-08-06 07:52:47 +0300 | [diff] [blame] | 3938 | trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3939 | ret = -EINVAL; |
| 3940 | break; |
| 3941 | } |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3942 | if (ret) |
| 3943 | goto out; |
Sarah Sharp | f7b2e40 | 2014-01-30 13:27:49 -0800 | [diff] [blame] | 3944 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); |
Xenia Ragiadakou | 84a99f6 | 2013-08-06 00:22:15 +0300 | [diff] [blame] | 3945 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
| 3946 | "Op regs DCBAA ptr = %#016llx", temp_64); |
| 3947 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
| 3948 | "Slot ID %d dcbaa entry @%p = %#016llx", |
| 3949 | udev->slot_id, |
| 3950 | &xhci->dcbaa->dev_context_ptrs[udev->slot_id], |
| 3951 | (unsigned long long) |
| 3952 | le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); |
| 3953 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
| 3954 | "Output Context DMA address = %#08llx", |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3955 | (unsigned long long)virt_dev->out_ctx->dma); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3956 | xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3957 | xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); |
Xenia Ragiadakou | 1d27fab | 2013-08-06 07:52:47 +0300 | [diff] [blame] | 3958 | trace_xhci_address_ctx(xhci, virt_dev->in_ctx, |
Xenia Ragiadakou | 0c052aa | 2013-11-15 03:18:07 +0200 | [diff] [blame] | 3959 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3960 | xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3961 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3962 | /* |
| 3963 | * USB core uses address 1 for the roothubs, so we add one to the |
| 3964 | * address given back to us by the HC. |
| 3965 | */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3966 | slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); |
Xenia Ragiadakou | 1d27fab | 2013-08-06 07:52:47 +0300 | [diff] [blame] | 3967 | trace_xhci_address_ctx(xhci, virt_dev->out_ctx, |
Xenia Ragiadakou | 0c052aa | 2013-11-15 03:18:07 +0200 | [diff] [blame] | 3968 | le32_to_cpu(slot_ctx->dev_info) >> 27); |
Sarah Sharp | f94e0186 | 2009-04-27 19:58:38 -0700 | [diff] [blame] | 3969 | /* Zero the input context control for later use */ |
John Youn | d115b04 | 2009-07-27 12:05:15 -0700 | [diff] [blame] | 3970 | ctrl_ctx->add_flags = 0; |
| 3971 | ctrl_ctx->drop_flags = 0; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3972 | |
Xenia Ragiadakou | 84a99f6 | 2013-08-06 00:22:15 +0300 | [diff] [blame] | 3973 | xhci_dbg_trace(xhci, trace_xhci_dbg_address, |
Dan Williams | a2cdc34 | 2013-10-16 12:25:44 -0700 | [diff] [blame] | 3974 | "Internal device address = %d", |
| 3975 | le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3976 | out: |
| 3977 | mutex_unlock(&xhci->mutex); |
Mathias Nyman | ddba5cd | 2014-05-08 19:26:00 +0300 | [diff] [blame] | 3978 | kfree(command); |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 3979 | return ret; |
Sarah Sharp | 3ffbba9 | 2009-04-27 19:57:38 -0700 | [diff] [blame] | 3980 | } |
| 3981 | |
Dan Williams | 48fc7db | 2013-12-05 17:07:27 -0800 | [diff] [blame] | 3982 | int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) |
| 3983 | { |
| 3984 | return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); |
| 3985 | } |
| 3986 | |
| 3987 | int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) |
| 3988 | { |
| 3989 | return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); |
| 3990 | } |
| 3991 | |
Lan Tianyu | 3f5eb14 | 2013-03-19 16:48:12 +0800 | [diff] [blame] | 3992 | /* |
| 3993 | * Transfer the port index into real index in the HW port status |
| 3994 | * registers. Caculate offset between the port's PORTSC register |
| 3995 | * and port status base. Divide the number of per port register |
| 3996 | * to get the real index. The raw port number bases 1. |
| 3997 | */ |
| 3998 | int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) |
| 3999 | { |
| 4000 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 4001 | __le32 __iomem *base_addr = &xhci->op_regs->port_status_base; |
| 4002 | __le32 __iomem *addr; |
| 4003 | int raw_port; |
| 4004 | |
Mathias Nyman | b50107b | 2015-10-01 18:40:38 +0300 | [diff] [blame] | 4005 | if (hcd->speed < HCD_USB3) |
Lan Tianyu | 3f5eb14 | 2013-03-19 16:48:12 +0800 | [diff] [blame] | 4006 | addr = xhci->usb2_ports[port1 - 1]; |
| 4007 | else |
| 4008 | addr = xhci->usb3_ports[port1 - 1]; |
| 4009 | |
| 4010 | raw_port = (addr - base_addr)/NUM_PORT_REGS + 1; |
| 4011 | return raw_port; |
| 4012 | } |
| 4013 | |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4014 | /* |
| 4015 | * Issue an Evaluate Context command to change the Maximum Exit Latency in the |
| 4016 | * slot context. If that succeeds, store the new MEL in the xhci_virt_device. |
| 4017 | */ |
Olof Johansson | d5c82fe | 2013-07-23 11:58:20 -0700 | [diff] [blame] | 4018 | static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4019 | struct usb_device *udev, u16 max_exit_latency) |
| 4020 | { |
| 4021 | struct xhci_virt_device *virt_dev; |
| 4022 | struct xhci_command *command; |
| 4023 | struct xhci_input_control_ctx *ctrl_ctx; |
| 4024 | struct xhci_slot_ctx *slot_ctx; |
| 4025 | unsigned long flags; |
| 4026 | int ret; |
| 4027 | |
| 4028 | spin_lock_irqsave(&xhci->lock, flags); |
Mathias Nyman | 9604469 | 2014-09-11 13:55:50 +0300 | [diff] [blame] | 4029 | |
| 4030 | virt_dev = xhci->devs[udev->slot_id]; |
| 4031 | |
| 4032 | /* |
| 4033 | * virt_dev might not exists yet if xHC resumed from hibernate (S4) and |
| 4034 | * xHC was re-initialized. Exit latency will be set later after |
| 4035 | * hub_port_finish_reset() is done and xhci->devs[] are re-allocated |
| 4036 | */ |
| 4037 | |
| 4038 | if (!virt_dev || max_exit_latency == virt_dev->current_mel) { |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4039 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4040 | return 0; |
| 4041 | } |
| 4042 | |
| 4043 | /* Attempt to issue an Evaluate Context command to change the MEL. */ |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4044 | command = xhci->lpm_command; |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 4045 | ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 4046 | if (!ctrl_ctx) { |
| 4047 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4048 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 4049 | __func__); |
| 4050 | return -ENOMEM; |
| 4051 | } |
| 4052 | |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4053 | xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); |
| 4054 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4055 | |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4056 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
| 4057 | slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); |
| 4058 | slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); |
| 4059 | slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); |
Mathias Nyman | 4801d4ea | 2014-11-27 18:19:15 +0200 | [diff] [blame] | 4060 | slot_ctx->dev_state = 0; |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4061 | |
Xenia Ragiadakou | 3a7fa5b | 2013-07-31 07:35:27 +0300 | [diff] [blame] | 4062 | xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, |
| 4063 | "Set up evaluate context for LPM MEL change."); |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4064 | xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); |
| 4065 | xhci_dbg_ctx(xhci, command->in_ctx, 0); |
| 4066 | |
| 4067 | /* Issue and wait for the evaluate context command. */ |
| 4068 | ret = xhci_configure_endpoint(xhci, udev, command, |
| 4069 | true, true); |
| 4070 | xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id); |
| 4071 | xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0); |
| 4072 | |
| 4073 | if (!ret) { |
| 4074 | spin_lock_irqsave(&xhci->lock, flags); |
| 4075 | virt_dev->current_mel = max_exit_latency; |
| 4076 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4077 | } |
| 4078 | return ret; |
| 4079 | } |
| 4080 | |
Rafael J. Wysocki | ceb6c9c | 2014-11-29 23:47:05 +0100 | [diff] [blame] | 4081 | #ifdef CONFIG_PM |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 4082 | |
| 4083 | /* BESL to HIRD Encoding array for USB2 LPM */ |
| 4084 | static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, |
| 4085 | 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; |
| 4086 | |
| 4087 | /* Calculate HIRD/BESL for USB2 PORTPMSC*/ |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 4088 | static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, |
| 4089 | struct usb_device *udev) |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 4090 | { |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 4091 | int u2del, besl, besl_host; |
| 4092 | int besl_device = 0; |
| 4093 | u32 field; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 4094 | |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 4095 | u2del = HCS_U2_LATENCY(xhci->hcs_params3); |
| 4096 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
| 4097 | |
| 4098 | if (field & USB_BESL_SUPPORT) { |
| 4099 | for (besl_host = 0; besl_host < 16; besl_host++) { |
| 4100 | if (xhci_besl_encoding[besl_host] >= u2del) |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 4101 | break; |
| 4102 | } |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 4103 | /* Use baseline BESL value as default */ |
| 4104 | if (field & USB_BESL_BASELINE_VALID) |
| 4105 | besl_device = USB_GET_BESL_BASELINE(field); |
| 4106 | else if (field & USB_BESL_DEEP_VALID) |
| 4107 | besl_device = USB_GET_BESL_DEEP(field); |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 4108 | } else { |
| 4109 | if (u2del <= 50) |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 4110 | besl_host = 0; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 4111 | else |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 4112 | besl_host = (u2del - 51) / 75 + 1; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 4113 | } |
| 4114 | |
Andiry Xu | f99298b | 2011-12-12 16:45:28 +0800 | [diff] [blame] | 4115 | besl = besl_host + besl_device; |
| 4116 | if (besl > 15) |
| 4117 | besl = 15; |
| 4118 | |
| 4119 | return besl; |
Andiry Xu | 9574323 | 2011-09-23 14:19:51 -0700 | [diff] [blame] | 4120 | } |
| 4121 | |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4122 | /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ |
| 4123 | static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) |
| 4124 | { |
| 4125 | u32 field; |
| 4126 | int l1; |
| 4127 | int besld = 0; |
| 4128 | int hirdm = 0; |
| 4129 | |
| 4130 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
| 4131 | |
| 4132 | /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ |
Mathias Nyman | 17f3486 | 2013-05-23 17:14:31 +0300 | [diff] [blame] | 4133 | l1 = udev->l1_params.timeout / 256; |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4134 | |
| 4135 | /* device has preferred BESLD */ |
| 4136 | if (field & USB_BESL_DEEP_VALID) { |
| 4137 | besld = USB_GET_BESL_DEEP(field); |
| 4138 | hirdm = 1; |
| 4139 | } |
| 4140 | |
| 4141 | return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); |
| 4142 | } |
| 4143 | |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4144 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
| 4145 | struct usb_device *udev, int enable) |
| 4146 | { |
| 4147 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 4148 | __le32 __iomem **port_array; |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4149 | __le32 __iomem *pm_addr, *hlpm_addr; |
| 4150 | u32 pm_val, hlpm_val, field; |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4151 | unsigned int port_num; |
| 4152 | unsigned long flags; |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4153 | int hird, exit_latency; |
| 4154 | int ret; |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4155 | |
Mathias Nyman | b50107b | 2015-10-01 18:40:38 +0300 | [diff] [blame] | 4156 | if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4157 | !udev->lpm_capable) |
| 4158 | return -EPERM; |
| 4159 | |
| 4160 | if (!udev->parent || udev->parent->parent || |
| 4161 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
| 4162 | return -EPERM; |
| 4163 | |
| 4164 | if (udev->usb2_hw_lpm_capable != 1) |
| 4165 | return -EPERM; |
| 4166 | |
| 4167 | spin_lock_irqsave(&xhci->lock, flags); |
| 4168 | |
| 4169 | port_array = xhci->usb2_ports; |
| 4170 | port_num = udev->portnum - 1; |
Mathias Nyman | b6e7637 | 2013-05-23 17:14:29 +0300 | [diff] [blame] | 4171 | pm_addr = port_array[port_num] + PORTPMSC; |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4172 | pm_val = readl(pm_addr); |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4173 | hlpm_addr = port_array[port_num] + PORTHLPMC; |
| 4174 | field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4175 | |
| 4176 | xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", |
Lin Wang | 654a55d | 2014-05-08 19:25:54 +0300 | [diff] [blame] | 4177 | enable ? "enable" : "disable", port_num + 1); |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4178 | |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4179 | if (enable) { |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4180 | /* Host supports BESL timeout instead of HIRD */ |
| 4181 | if (udev->usb2_hw_lpm_besl_capable) { |
| 4182 | /* if device doesn't have a preferred BESL value use a |
| 4183 | * default one which works with mixed HIRD and BESL |
| 4184 | * systems. See XHCI_DEFAULT_BESL definition in xhci.h |
| 4185 | */ |
| 4186 | if ((field & USB_BESL_SUPPORT) && |
| 4187 | (field & USB_BESL_BASELINE_VALID)) |
| 4188 | hird = USB_GET_BESL_BASELINE(field); |
| 4189 | else |
Mathias Nyman | 17f3486 | 2013-05-23 17:14:31 +0300 | [diff] [blame] | 4190 | hird = udev->l1_params.besl; |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4191 | |
| 4192 | exit_latency = xhci_besl_encoding[hird]; |
| 4193 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4194 | |
| 4195 | /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx |
| 4196 | * input context for link powermanagement evaluate |
| 4197 | * context commands. It is protected by hcd->bandwidth |
| 4198 | * mutex and is shared by all devices. We need to set |
| 4199 | * the max ext latency in USB 2 BESL LPM as well, so |
| 4200 | * use the same mutex and xhci_change_max_exit_latency() |
| 4201 | */ |
| 4202 | mutex_lock(hcd->bandwidth_mutex); |
| 4203 | ret = xhci_change_max_exit_latency(xhci, udev, |
| 4204 | exit_latency); |
| 4205 | mutex_unlock(hcd->bandwidth_mutex); |
| 4206 | |
| 4207 | if (ret < 0) |
| 4208 | return ret; |
| 4209 | spin_lock_irqsave(&xhci->lock, flags); |
| 4210 | |
| 4211 | hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 4212 | writel(hlpm_val, hlpm_addr); |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4213 | /* flush write */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4214 | readl(hlpm_addr); |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4215 | } else { |
| 4216 | hird = xhci_calculate_hird_besl(xhci, udev); |
| 4217 | } |
| 4218 | |
| 4219 | pm_val &= ~PORT_HIRD_MASK; |
Sarah Sharp | 58e21f7 | 2013-10-07 17:17:20 -0700 | [diff] [blame] | 4220 | pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 4221 | writel(pm_val, pm_addr); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4222 | pm_val = readl(pm_addr); |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4223 | pm_val |= PORT_HLE; |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 4224 | writel(pm_val, pm_addr); |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4225 | /* flush write */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4226 | readl(pm_addr); |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4227 | } else { |
Sarah Sharp | 58e21f7 | 2013-10-07 17:17:20 -0700 | [diff] [blame] | 4228 | pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); |
Xenia Ragiadakou | 204b779 | 2013-11-15 05:34:07 +0200 | [diff] [blame] | 4229 | writel(pm_val, pm_addr); |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4230 | /* flush write */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4231 | readl(pm_addr); |
Mathias Nyman | a558ccd | 2013-05-23 17:14:30 +0300 | [diff] [blame] | 4232 | if (udev->usb2_hw_lpm_besl_capable) { |
| 4233 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4234 | mutex_lock(hcd->bandwidth_mutex); |
| 4235 | xhci_change_max_exit_latency(xhci, udev, 0); |
| 4236 | mutex_unlock(hcd->bandwidth_mutex); |
| 4237 | return 0; |
| 4238 | } |
Andiry Xu | 65580b43 | 2011-09-23 14:19:52 -0700 | [diff] [blame] | 4239 | } |
| 4240 | |
| 4241 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4242 | return 0; |
| 4243 | } |
| 4244 | |
Mathias Nyman | b630d4b | 2013-05-23 17:14:28 +0300 | [diff] [blame] | 4245 | /* check if a usb2 port supports a given extened capability protocol |
| 4246 | * only USB2 ports extended protocol capability values are cached. |
| 4247 | * Return 1 if capability is supported |
| 4248 | */ |
| 4249 | static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, |
| 4250 | unsigned capability) |
| 4251 | { |
| 4252 | u32 port_offset, port_count; |
| 4253 | int i; |
| 4254 | |
| 4255 | for (i = 0; i < xhci->num_ext_caps; i++) { |
| 4256 | if (xhci->ext_caps[i] & capability) { |
| 4257 | /* port offsets starts at 1 */ |
| 4258 | port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; |
| 4259 | port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); |
| 4260 | if (port >= port_offset && |
| 4261 | port < port_offset + port_count) |
| 4262 | return 1; |
| 4263 | } |
| 4264 | } |
| 4265 | return 0; |
| 4266 | } |
| 4267 | |
Sarah Sharp | b01bcbf | 2012-05-21 07:54:42 -0700 | [diff] [blame] | 4268 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
| 4269 | { |
| 4270 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
Mathias Nyman | b630d4b | 2013-05-23 17:14:28 +0300 | [diff] [blame] | 4271 | int portnum = udev->portnum - 1; |
Sarah Sharp | b01bcbf | 2012-05-21 07:54:42 -0700 | [diff] [blame] | 4272 | |
Mathias Nyman | b50107b | 2015-10-01 18:40:38 +0300 | [diff] [blame] | 4273 | if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support || |
Sarah Sharp | de68bab | 2013-09-30 17:26:28 +0300 | [diff] [blame] | 4274 | !udev->lpm_capable) |
| 4275 | return 0; |
| 4276 | |
| 4277 | /* we only support lpm for non-hub device connected to root hub yet */ |
| 4278 | if (!udev->parent || udev->parent->parent || |
| 4279 | udev->descriptor.bDeviceClass == USB_CLASS_HUB) |
| 4280 | return 0; |
| 4281 | |
| 4282 | if (xhci->hw_lpm_support == 1 && |
| 4283 | xhci_check_usb2_port_capability( |
| 4284 | xhci, portnum, XHCI_HLC)) { |
| 4285 | udev->usb2_hw_lpm_capable = 1; |
| 4286 | udev->l1_params.timeout = XHCI_L1_TIMEOUT; |
| 4287 | udev->l1_params.besl = XHCI_DEFAULT_BESL; |
| 4288 | if (xhci_check_usb2_port_capability(xhci, portnum, |
| 4289 | XHCI_BLC)) |
| 4290 | udev->usb2_hw_lpm_besl_capable = 1; |
Sarah Sharp | b01bcbf | 2012-05-21 07:54:42 -0700 | [diff] [blame] | 4291 | } |
| 4292 | |
| 4293 | return 0; |
| 4294 | } |
| 4295 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4296 | /*---------------------- USB 3.0 Link PM functions ------------------------*/ |
| 4297 | |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4298 | /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ |
| 4299 | static unsigned long long xhci_service_interval_to_ns( |
| 4300 | struct usb_endpoint_descriptor *desc) |
| 4301 | { |
Oliver Neukum | 16b45fd | 2012-10-17 10:16:16 +0200 | [diff] [blame] | 4302 | return (1ULL << (desc->bInterval - 1)) * 125 * 1000; |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4303 | } |
| 4304 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4305 | static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, |
| 4306 | enum usb3_link_state state) |
| 4307 | { |
| 4308 | unsigned long long sel; |
| 4309 | unsigned long long pel; |
| 4310 | unsigned int max_sel_pel; |
| 4311 | char *state_name; |
| 4312 | |
| 4313 | switch (state) { |
| 4314 | case USB3_LPM_U1: |
| 4315 | /* Convert SEL and PEL stored in nanoseconds to microseconds */ |
| 4316 | sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); |
| 4317 | pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); |
| 4318 | max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; |
| 4319 | state_name = "U1"; |
| 4320 | break; |
| 4321 | case USB3_LPM_U2: |
| 4322 | sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); |
| 4323 | pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); |
| 4324 | max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; |
| 4325 | state_name = "U2"; |
| 4326 | break; |
| 4327 | default: |
| 4328 | dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", |
| 4329 | __func__); |
Sarah Sharp | e25e62a | 2012-06-07 11:10:32 -0700 | [diff] [blame] | 4330 | return USB3_LPM_DISABLED; |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4331 | } |
| 4332 | |
| 4333 | if (sel <= max_sel_pel && pel <= max_sel_pel) |
| 4334 | return USB3_LPM_DEVICE_INITIATED; |
| 4335 | |
| 4336 | if (sel > max_sel_pel) |
| 4337 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
| 4338 | "due to long SEL %llu ms\n", |
| 4339 | state_name, sel); |
| 4340 | else |
| 4341 | dev_dbg(&udev->dev, "Device-initiated %s disabled " |
Joe Perches | 03e64e9 | 2013-07-16 19:25:59 -0700 | [diff] [blame] | 4342 | "due to long PEL %llu ms\n", |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4343 | state_name, pel); |
| 4344 | return USB3_LPM_DISABLED; |
| 4345 | } |
| 4346 | |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4347 | /* The U1 timeout should be the maximum of the following values: |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4348 | * - For control endpoints, U1 system exit latency (SEL) * 3 |
| 4349 | * - For bulk endpoints, U1 SEL * 5 |
| 4350 | * - For interrupt endpoints: |
| 4351 | * - Notification EPs, U1 SEL * 3 |
| 4352 | * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) |
| 4353 | * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) |
| 4354 | */ |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4355 | static unsigned long long xhci_calculate_intel_u1_timeout( |
| 4356 | struct usb_device *udev, |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4357 | struct usb_endpoint_descriptor *desc) |
| 4358 | { |
| 4359 | unsigned long long timeout_ns; |
| 4360 | int ep_type; |
| 4361 | int intr_type; |
| 4362 | |
| 4363 | ep_type = usb_endpoint_type(desc); |
| 4364 | switch (ep_type) { |
| 4365 | case USB_ENDPOINT_XFER_CONTROL: |
| 4366 | timeout_ns = udev->u1_params.sel * 3; |
| 4367 | break; |
| 4368 | case USB_ENDPOINT_XFER_BULK: |
| 4369 | timeout_ns = udev->u1_params.sel * 5; |
| 4370 | break; |
| 4371 | case USB_ENDPOINT_XFER_INT: |
| 4372 | intr_type = usb_endpoint_interrupt_type(desc); |
| 4373 | if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { |
| 4374 | timeout_ns = udev->u1_params.sel * 3; |
| 4375 | break; |
| 4376 | } |
| 4377 | /* Otherwise the calculation is the same as isoc eps */ |
| 4378 | case USB_ENDPOINT_XFER_ISOC: |
| 4379 | timeout_ns = xhci_service_interval_to_ns(desc); |
Sarah Sharp | c88db16 | 2012-05-21 08:44:33 -0700 | [diff] [blame] | 4380 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4381 | if (timeout_ns < udev->u1_params.sel * 2) |
| 4382 | timeout_ns = udev->u1_params.sel * 2; |
| 4383 | break; |
| 4384 | default: |
| 4385 | return 0; |
| 4386 | } |
| 4387 | |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4388 | return timeout_ns; |
| 4389 | } |
| 4390 | |
| 4391 | /* Returns the hub-encoded U1 timeout value. */ |
| 4392 | static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, |
| 4393 | struct usb_device *udev, |
| 4394 | struct usb_endpoint_descriptor *desc) |
| 4395 | { |
| 4396 | unsigned long long timeout_ns; |
| 4397 | |
| 4398 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 4399 | timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); |
| 4400 | else |
| 4401 | timeout_ns = udev->u1_params.sel; |
| 4402 | |
| 4403 | /* The U1 timeout is encoded in 1us intervals. |
| 4404 | * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. |
| 4405 | */ |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4406 | if (timeout_ns == USB3_LPM_DISABLED) |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4407 | timeout_ns = 1; |
| 4408 | else |
| 4409 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4410 | |
| 4411 | /* If the necessary timeout value is bigger than what we can set in the |
| 4412 | * USB 3.0 hub, we have to disable hub-initiated U1. |
| 4413 | */ |
| 4414 | if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) |
| 4415 | return timeout_ns; |
| 4416 | dev_dbg(&udev->dev, "Hub-initiated U1 disabled " |
| 4417 | "due to long timeout %llu ms\n", timeout_ns); |
| 4418 | return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); |
| 4419 | } |
| 4420 | |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4421 | /* The U2 timeout should be the maximum of: |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4422 | * - 10 ms (to avoid the bandwidth impact on the scheduler) |
| 4423 | * - largest bInterval of any active periodic endpoint (to avoid going |
| 4424 | * into lower power link states between intervals). |
| 4425 | * - the U2 Exit Latency of the device |
| 4426 | */ |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4427 | static unsigned long long xhci_calculate_intel_u2_timeout( |
| 4428 | struct usb_device *udev, |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4429 | struct usb_endpoint_descriptor *desc) |
| 4430 | { |
| 4431 | unsigned long long timeout_ns; |
| 4432 | unsigned long long u2_del_ns; |
| 4433 | |
| 4434 | timeout_ns = 10 * 1000 * 1000; |
| 4435 | |
| 4436 | if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && |
| 4437 | (xhci_service_interval_to_ns(desc) > timeout_ns)) |
| 4438 | timeout_ns = xhci_service_interval_to_ns(desc); |
| 4439 | |
Oliver Neukum | 966e7a8 | 2012-10-17 12:17:50 +0200 | [diff] [blame] | 4440 | u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4441 | if (u2_del_ns > timeout_ns) |
| 4442 | timeout_ns = u2_del_ns; |
| 4443 | |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4444 | return timeout_ns; |
| 4445 | } |
| 4446 | |
| 4447 | /* Returns the hub-encoded U2 timeout value. */ |
| 4448 | static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, |
| 4449 | struct usb_device *udev, |
| 4450 | struct usb_endpoint_descriptor *desc) |
| 4451 | { |
| 4452 | unsigned long long timeout_ns; |
| 4453 | |
| 4454 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 4455 | timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); |
| 4456 | else |
| 4457 | timeout_ns = udev->u2_params.sel; |
| 4458 | |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4459 | /* The U2 timeout is encoded in 256us intervals */ |
Sarah Sharp | c88db16 | 2012-05-21 08:44:33 -0700 | [diff] [blame] | 4460 | timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4461 | /* If the necessary timeout value is bigger than what we can set in the |
| 4462 | * USB 3.0 hub, we have to disable hub-initiated U2. |
| 4463 | */ |
| 4464 | if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) |
| 4465 | return timeout_ns; |
| 4466 | dev_dbg(&udev->dev, "Hub-initiated U2 disabled " |
| 4467 | "due to long timeout %llu ms\n", timeout_ns); |
| 4468 | return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); |
| 4469 | } |
| 4470 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4471 | static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
| 4472 | struct usb_device *udev, |
| 4473 | struct usb_endpoint_descriptor *desc, |
| 4474 | enum usb3_link_state state, |
| 4475 | u16 *timeout) |
| 4476 | { |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4477 | if (state == USB3_LPM_U1) |
| 4478 | return xhci_calculate_u1_timeout(xhci, udev, desc); |
| 4479 | else if (state == USB3_LPM_U2) |
| 4480 | return xhci_calculate_u2_timeout(xhci, udev, desc); |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4481 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4482 | return USB3_LPM_DISABLED; |
| 4483 | } |
| 4484 | |
| 4485 | static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, |
| 4486 | struct usb_device *udev, |
| 4487 | struct usb_endpoint_descriptor *desc, |
| 4488 | enum usb3_link_state state, |
| 4489 | u16 *timeout) |
| 4490 | { |
| 4491 | u16 alt_timeout; |
| 4492 | |
| 4493 | alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, |
| 4494 | desc, state, timeout); |
| 4495 | |
| 4496 | /* If we found we can't enable hub-initiated LPM, or |
| 4497 | * the U1 or U2 exit latency was too high to allow |
| 4498 | * device-initiated LPM as well, just stop searching. |
| 4499 | */ |
| 4500 | if (alt_timeout == USB3_LPM_DISABLED || |
| 4501 | alt_timeout == USB3_LPM_DEVICE_INITIATED) { |
| 4502 | *timeout = alt_timeout; |
| 4503 | return -E2BIG; |
| 4504 | } |
| 4505 | if (alt_timeout > *timeout) |
| 4506 | *timeout = alt_timeout; |
| 4507 | return 0; |
| 4508 | } |
| 4509 | |
| 4510 | static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, |
| 4511 | struct usb_device *udev, |
| 4512 | struct usb_host_interface *alt, |
| 4513 | enum usb3_link_state state, |
| 4514 | u16 *timeout) |
| 4515 | { |
| 4516 | int j; |
| 4517 | |
| 4518 | for (j = 0; j < alt->desc.bNumEndpoints; j++) { |
| 4519 | if (xhci_update_timeout_for_endpoint(xhci, udev, |
| 4520 | &alt->endpoint[j].desc, state, timeout)) |
| 4521 | return -E2BIG; |
| 4522 | continue; |
| 4523 | } |
| 4524 | return 0; |
| 4525 | } |
| 4526 | |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4527 | static int xhci_check_intel_tier_policy(struct usb_device *udev, |
| 4528 | enum usb3_link_state state) |
| 4529 | { |
| 4530 | struct usb_device *parent; |
| 4531 | unsigned int num_hubs; |
| 4532 | |
| 4533 | if (state == USB3_LPM_U2) |
| 4534 | return 0; |
| 4535 | |
| 4536 | /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ |
| 4537 | for (parent = udev->parent, num_hubs = 0; parent->parent; |
| 4538 | parent = parent->parent) |
| 4539 | num_hubs++; |
| 4540 | |
| 4541 | if (num_hubs < 2) |
| 4542 | return 0; |
| 4543 | |
| 4544 | dev_dbg(&udev->dev, "Disabling U1 link state for device" |
| 4545 | " below second-tier hub.\n"); |
| 4546 | dev_dbg(&udev->dev, "Plug device into first-tier hub " |
| 4547 | "to decrease power consumption.\n"); |
| 4548 | return -E2BIG; |
| 4549 | } |
| 4550 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4551 | static int xhci_check_tier_policy(struct xhci_hcd *xhci, |
| 4552 | struct usb_device *udev, |
| 4553 | enum usb3_link_state state) |
| 4554 | { |
Sarah Sharp | e3567d2 | 2012-05-16 13:36:24 -0700 | [diff] [blame] | 4555 | if (xhci->quirks & XHCI_INTEL_HOST) |
| 4556 | return xhci_check_intel_tier_policy(udev, state); |
Pratyush Anand | 9502c46 | 2014-07-04 17:01:23 +0300 | [diff] [blame] | 4557 | else |
| 4558 | return 0; |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4559 | } |
| 4560 | |
| 4561 | /* Returns the U1 or U2 timeout that should be enabled. |
| 4562 | * If the tier check or timeout setting functions return with a non-zero exit |
| 4563 | * code, that means the timeout value has been finalized and we shouldn't look |
| 4564 | * at any more endpoints. |
| 4565 | */ |
| 4566 | static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, |
| 4567 | struct usb_device *udev, enum usb3_link_state state) |
| 4568 | { |
| 4569 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 4570 | struct usb_host_config *config; |
| 4571 | char *state_name; |
| 4572 | int i; |
| 4573 | u16 timeout = USB3_LPM_DISABLED; |
| 4574 | |
| 4575 | if (state == USB3_LPM_U1) |
| 4576 | state_name = "U1"; |
| 4577 | else if (state == USB3_LPM_U2) |
| 4578 | state_name = "U2"; |
| 4579 | else { |
| 4580 | dev_warn(&udev->dev, "Can't enable unknown link state %i\n", |
| 4581 | state); |
| 4582 | return timeout; |
| 4583 | } |
| 4584 | |
| 4585 | if (xhci_check_tier_policy(xhci, udev, state) < 0) |
| 4586 | return timeout; |
| 4587 | |
| 4588 | /* Gather some information about the currently installed configuration |
| 4589 | * and alternate interface settings. |
| 4590 | */ |
| 4591 | if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, |
| 4592 | state, &timeout)) |
| 4593 | return timeout; |
| 4594 | |
| 4595 | config = udev->actconfig; |
| 4596 | if (!config) |
| 4597 | return timeout; |
| 4598 | |
Xenia Ragiadakou | 64ba419 | 2013-08-26 23:29:46 +0300 | [diff] [blame] | 4599 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4600 | struct usb_driver *driver; |
| 4601 | struct usb_interface *intf = config->interface[i]; |
| 4602 | |
| 4603 | if (!intf) |
| 4604 | continue; |
| 4605 | |
| 4606 | /* Check if any currently bound drivers want hub-initiated LPM |
| 4607 | * disabled. |
| 4608 | */ |
| 4609 | if (intf->dev.driver) { |
| 4610 | driver = to_usb_driver(intf->dev.driver); |
| 4611 | if (driver && driver->disable_hub_initiated_lpm) { |
| 4612 | dev_dbg(&udev->dev, "Hub-initiated %s disabled " |
| 4613 | "at request of driver %s\n", |
| 4614 | state_name, driver->name); |
| 4615 | return xhci_get_timeout_no_hub_lpm(udev, state); |
| 4616 | } |
| 4617 | } |
| 4618 | |
| 4619 | /* Not sure how this could happen... */ |
| 4620 | if (!intf->cur_altsetting) |
| 4621 | continue; |
| 4622 | |
| 4623 | if (xhci_update_timeout_for_interface(xhci, udev, |
| 4624 | intf->cur_altsetting, |
| 4625 | state, &timeout)) |
| 4626 | return timeout; |
| 4627 | } |
| 4628 | return timeout; |
| 4629 | } |
| 4630 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4631 | static int calculate_max_exit_latency(struct usb_device *udev, |
| 4632 | enum usb3_link_state state_changed, |
| 4633 | u16 hub_encoded_timeout) |
| 4634 | { |
| 4635 | unsigned long long u1_mel_us = 0; |
| 4636 | unsigned long long u2_mel_us = 0; |
| 4637 | unsigned long long mel_us = 0; |
| 4638 | bool disabling_u1; |
| 4639 | bool disabling_u2; |
| 4640 | bool enabling_u1; |
| 4641 | bool enabling_u2; |
| 4642 | |
| 4643 | disabling_u1 = (state_changed == USB3_LPM_U1 && |
| 4644 | hub_encoded_timeout == USB3_LPM_DISABLED); |
| 4645 | disabling_u2 = (state_changed == USB3_LPM_U2 && |
| 4646 | hub_encoded_timeout == USB3_LPM_DISABLED); |
| 4647 | |
| 4648 | enabling_u1 = (state_changed == USB3_LPM_U1 && |
| 4649 | hub_encoded_timeout != USB3_LPM_DISABLED); |
| 4650 | enabling_u2 = (state_changed == USB3_LPM_U2 && |
| 4651 | hub_encoded_timeout != USB3_LPM_DISABLED); |
| 4652 | |
| 4653 | /* If U1 was already enabled and we're not disabling it, |
| 4654 | * or we're going to enable U1, account for the U1 max exit latency. |
| 4655 | */ |
| 4656 | if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || |
| 4657 | enabling_u1) |
| 4658 | u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); |
| 4659 | if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || |
| 4660 | enabling_u2) |
| 4661 | u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); |
| 4662 | |
| 4663 | if (u1_mel_us > u2_mel_us) |
| 4664 | mel_us = u1_mel_us; |
| 4665 | else |
| 4666 | mel_us = u2_mel_us; |
| 4667 | /* xHCI host controller max exit latency field is only 16 bits wide. */ |
| 4668 | if (mel_us > MAX_EXIT) { |
| 4669 | dev_warn(&udev->dev, "Link PM max exit latency of %lluus " |
| 4670 | "is too big.\n", mel_us); |
| 4671 | return -E2BIG; |
| 4672 | } |
| 4673 | return mel_us; |
| 4674 | } |
| 4675 | |
| 4676 | /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ |
| 4677 | int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
| 4678 | struct usb_device *udev, enum usb3_link_state state) |
| 4679 | { |
| 4680 | struct xhci_hcd *xhci; |
| 4681 | u16 hub_encoded_timeout; |
| 4682 | int mel; |
| 4683 | int ret; |
| 4684 | |
| 4685 | xhci = hcd_to_xhci(hcd); |
| 4686 | /* The LPM timeout values are pretty host-controller specific, so don't |
| 4687 | * enable hub-initiated timeouts unless the vendor has provided |
| 4688 | * information about their timeout algorithm. |
| 4689 | */ |
| 4690 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
| 4691 | !xhci->devs[udev->slot_id]) |
| 4692 | return USB3_LPM_DISABLED; |
| 4693 | |
| 4694 | hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); |
| 4695 | mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); |
| 4696 | if (mel < 0) { |
| 4697 | /* Max Exit Latency is too big, disable LPM. */ |
| 4698 | hub_encoded_timeout = USB3_LPM_DISABLED; |
| 4699 | mel = 0; |
| 4700 | } |
| 4701 | |
| 4702 | ret = xhci_change_max_exit_latency(xhci, udev, mel); |
| 4703 | if (ret) |
| 4704 | return ret; |
| 4705 | return hub_encoded_timeout; |
| 4706 | } |
| 4707 | |
| 4708 | int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
| 4709 | struct usb_device *udev, enum usb3_link_state state) |
| 4710 | { |
| 4711 | struct xhci_hcd *xhci; |
| 4712 | u16 mel; |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4713 | |
| 4714 | xhci = hcd_to_xhci(hcd); |
| 4715 | if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || |
| 4716 | !xhci->devs[udev->slot_id]) |
| 4717 | return 0; |
| 4718 | |
| 4719 | mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); |
Saurabh Karajgaonkar | f1cda54 | 2015-08-04 14:04:09 +0000 | [diff] [blame] | 4720 | return xhci_change_max_exit_latency(xhci, udev, mel); |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4721 | } |
Sarah Sharp | b01bcbf | 2012-05-21 07:54:42 -0700 | [diff] [blame] | 4722 | #else /* CONFIG_PM */ |
| 4723 | |
Rafael J. Wysocki | ceb6c9c | 2014-11-29 23:47:05 +0100 | [diff] [blame] | 4724 | int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, |
| 4725 | struct usb_device *udev, int enable) |
| 4726 | { |
| 4727 | return 0; |
| 4728 | } |
| 4729 | |
| 4730 | int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) |
| 4731 | { |
| 4732 | return 0; |
| 4733 | } |
| 4734 | |
Sarah Sharp | b01bcbf | 2012-05-21 07:54:42 -0700 | [diff] [blame] | 4735 | int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, |
| 4736 | struct usb_device *udev, enum usb3_link_state state) |
| 4737 | { |
| 4738 | return USB3_LPM_DISABLED; |
| 4739 | } |
| 4740 | |
| 4741 | int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, |
| 4742 | struct usb_device *udev, enum usb3_link_state state) |
| 4743 | { |
| 4744 | return 0; |
| 4745 | } |
| 4746 | #endif /* CONFIG_PM */ |
| 4747 | |
Sarah Sharp | 3b3db02 | 2012-05-09 10:55:03 -0700 | [diff] [blame] | 4748 | /*-------------------------------------------------------------------------*/ |
| 4749 | |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4750 | /* Once a hub descriptor is fetched for a device, we need to update the xHC's |
| 4751 | * internal data structures for the device. |
| 4752 | */ |
| 4753 | int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, |
| 4754 | struct usb_tt *tt, gfp_t mem_flags) |
| 4755 | { |
| 4756 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 4757 | struct xhci_virt_device *vdev; |
| 4758 | struct xhci_command *config_cmd; |
| 4759 | struct xhci_input_control_ctx *ctrl_ctx; |
| 4760 | struct xhci_slot_ctx *slot_ctx; |
| 4761 | unsigned long flags; |
| 4762 | unsigned think_time; |
| 4763 | int ret; |
| 4764 | |
| 4765 | /* Ignore root hubs */ |
| 4766 | if (!hdev->parent) |
| 4767 | return 0; |
| 4768 | |
| 4769 | vdev = xhci->devs[hdev->slot_id]; |
| 4770 | if (!vdev) { |
| 4771 | xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); |
| 4772 | return -EINVAL; |
| 4773 | } |
Sarah Sharp | a1d78c1 | 2009-12-09 15:59:03 -0800 | [diff] [blame] | 4774 | config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4775 | if (!config_cmd) { |
| 4776 | xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); |
| 4777 | return -ENOMEM; |
| 4778 | } |
Lin Wang | 4daf9df | 2015-01-09 16:06:31 +0200 | [diff] [blame] | 4779 | ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); |
Sarah Sharp | 92f8e76 | 2013-04-23 17:11:14 -0700 | [diff] [blame] | 4780 | if (!ctrl_ctx) { |
| 4781 | xhci_warn(xhci, "%s: Could not get input context, bad type.\n", |
| 4782 | __func__); |
| 4783 | xhci_free_command(xhci, config_cmd); |
| 4784 | return -ENOMEM; |
| 4785 | } |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4786 | |
| 4787 | spin_lock_irqsave(&xhci->lock, flags); |
Sarah Sharp | 839c817 | 2011-09-02 11:05:47 -0700 | [diff] [blame] | 4788 | if (hdev->speed == USB_SPEED_HIGH && |
| 4789 | xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { |
| 4790 | xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); |
| 4791 | xhci_free_command(xhci, config_cmd); |
| 4792 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4793 | return -ENOMEM; |
| 4794 | } |
| 4795 | |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4796 | xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 4797 | ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4798 | slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 4799 | slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); |
Chunfeng Yun | 096b110 | 2015-12-04 15:53:43 +0200 | [diff] [blame] | 4800 | /* |
| 4801 | * refer to section 6.2.2: MTT should be 0 for full speed hub, |
| 4802 | * but it may be already set to 1 when setup an xHCI virtual |
| 4803 | * device, so clear it anyway. |
| 4804 | */ |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4805 | if (tt->multi) |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 4806 | slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); |
Chunfeng Yun | 096b110 | 2015-12-04 15:53:43 +0200 | [diff] [blame] | 4807 | else if (hdev->speed == USB_SPEED_FULL) |
| 4808 | slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); |
| 4809 | |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4810 | if (xhci->hci_version > 0x95) { |
| 4811 | xhci_dbg(xhci, "xHCI version %x needs hub " |
| 4812 | "TT think time and number of ports\n", |
| 4813 | (unsigned int) xhci->hci_version); |
Matt Evans | 28ccd29 | 2011-03-29 13:40:46 +1100 | [diff] [blame] | 4814 | slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4815 | /* Set TT think time - convert from ns to FS bit times. |
| 4816 | * 0 = 8 FS bit times, 1 = 16 FS bit times, |
| 4817 | * 2 = 24 FS bit times, 3 = 32 FS bit times. |
Andiry Xu | 700b417 | 2011-05-05 18:14:05 +0800 | [diff] [blame] | 4818 | * |
| 4819 | * xHCI 1.0: this field shall be 0 if the device is not a |
| 4820 | * High-spped hub. |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4821 | */ |
| 4822 | think_time = tt->think_time; |
| 4823 | if (think_time != 0) |
| 4824 | think_time = (think_time / 666) - 1; |
Andiry Xu | 700b417 | 2011-05-05 18:14:05 +0800 | [diff] [blame] | 4825 | if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) |
| 4826 | slot_ctx->tt_info |= |
| 4827 | cpu_to_le32(TT_THINK_TIME(think_time)); |
Sarah Sharp | ac1c1b7 | 2009-09-04 10:53:20 -0700 | [diff] [blame] | 4828 | } else { |
| 4829 | xhci_dbg(xhci, "xHCI version %x doesn't need hub " |
| 4830 | "TT think time or number of ports\n", |
| 4831 | (unsigned int) xhci->hci_version); |
| 4832 | } |
| 4833 | slot_ctx->dev_state = 0; |
| 4834 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 4835 | |
| 4836 | xhci_dbg(xhci, "Set up %s for hub device.\n", |
| 4837 | (xhci->hci_version > 0x95) ? |
| 4838 | "configure endpoint" : "evaluate context"); |
| 4839 | xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); |
| 4840 | xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); |
| 4841 | |
| 4842 | /* Issue and wait for the configure endpoint or |
| 4843 | * evaluate context command. |
| 4844 | */ |
| 4845 | if (xhci->hci_version > 0x95) |
| 4846 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, |
| 4847 | false, false); |
| 4848 | else |
| 4849 | ret = xhci_configure_endpoint(xhci, hdev, config_cmd, |
| 4850 | true, false); |
| 4851 | |
| 4852 | xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); |
| 4853 | xhci_dbg_ctx(xhci, vdev->out_ctx, 0); |
| 4854 | |
| 4855 | xhci_free_command(xhci, config_cmd); |
| 4856 | return ret; |
| 4857 | } |
| 4858 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4859 | int xhci_get_frame(struct usb_hcd *hcd) |
| 4860 | { |
| 4861 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 4862 | /* EHCI mods by the periodic size. Why? */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4863 | return readl(&xhci->run_regs->microframe_index) >> 3; |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 4864 | } |
| 4865 | |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4866 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) |
| 4867 | { |
| 4868 | struct xhci_hcd *xhci; |
| 4869 | struct device *dev = hcd->self.controller; |
| 4870 | int retval; |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4871 | |
Sarah Sharp | 1386ff7 | 2014-01-31 11:45:02 -0800 | [diff] [blame] | 4872 | /* Accept arbitrarily long scatter-gather lists */ |
| 4873 | hcd->self.sg_tablesize = ~0; |
Ming Lei | fc76051 | 2013-08-08 21:48:23 +0800 | [diff] [blame] | 4874 | |
Mathias Nyman | e2ed511 | 2014-03-07 17:06:57 +0200 | [diff] [blame] | 4875 | /* support to build packet from discontinuous buffers */ |
| 4876 | hcd->self.no_sg_constraint = 1; |
| 4877 | |
Hans de Goede | 19181bc | 2012-07-04 09:18:02 +0200 | [diff] [blame] | 4878 | /* XHCI controllers don't stop the ep queue on short packets :| */ |
| 4879 | hcd->self.no_stop_on_short = 1; |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4880 | |
Mathias Nyman | b50107b | 2015-10-01 18:40:38 +0300 | [diff] [blame] | 4881 | xhci = hcd_to_xhci(hcd); |
| 4882 | |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4883 | if (usb_hcd_is_primary_hcd(hcd)) { |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4884 | xhci->main_hcd = hcd; |
| 4885 | /* Mark the first roothub as being USB 2.0. |
| 4886 | * The xHCI driver will register the USB 3.0 roothub. |
| 4887 | */ |
| 4888 | hcd->speed = HCD_USB2; |
| 4889 | hcd->self.root_hub->speed = USB_SPEED_HIGH; |
| 4890 | /* |
| 4891 | * USB 2.0 roothub under xHCI has an integrated TT, |
| 4892 | * (rate matching hub) as opposed to having an OHCI/UHCI |
| 4893 | * companion controller. |
| 4894 | */ |
| 4895 | hcd->has_tt = 1; |
| 4896 | } else { |
Mathias Nyman | b50107b | 2015-10-01 18:40:38 +0300 | [diff] [blame] | 4897 | if (xhci->sbrn == 0x31) { |
| 4898 | xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); |
| 4899 | hcd->speed = HCD_USB31; |
| 4900 | } |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4901 | /* xHCI private pointer was set in xhci_pci_probe for the second |
| 4902 | * registered roothub. |
| 4903 | */ |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4904 | return 0; |
| 4905 | } |
| 4906 | |
Chris Bainbridge | a00918d | 2015-05-19 16:30:51 +0300 | [diff] [blame] | 4907 | mutex_init(&xhci->mutex); |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4908 | xhci->cap_regs = hcd->regs; |
| 4909 | xhci->op_regs = hcd->regs + |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4910 | HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4911 | xhci->run_regs = hcd->regs + |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4912 | (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4913 | /* Cache read-only capability registers */ |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4914 | xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); |
| 4915 | xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); |
| 4916 | xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); |
| 4917 | xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4918 | xhci->hci_version = HC_VERSION(xhci->hcc_params); |
Xenia Ragiadakou | b0ba972 | 2013-11-15 05:34:06 +0200 | [diff] [blame] | 4919 | xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); |
Lu Baolu | 04abb6d | 2015-10-01 18:40:31 +0300 | [diff] [blame] | 4920 | if (xhci->hci_version > 0x100) |
| 4921 | xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4922 | xhci_print_registers(xhci); |
| 4923 | |
Takashi Iwai | 4e6a1ee | 2013-12-09 12:42:48 +0100 | [diff] [blame] | 4924 | xhci->quirks = quirks; |
| 4925 | |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4926 | get_quirks(dev, xhci); |
| 4927 | |
George Cherian | 07f3cb7 | 2013-07-01 10:59:12 +0530 | [diff] [blame] | 4928 | /* In xhci controllers which follow xhci 1.0 spec gives a spurious |
| 4929 | * success event after a short transfer. This quirk will ignore such |
| 4930 | * spurious event. |
| 4931 | */ |
| 4932 | if (xhci->hci_version > 0x96) |
| 4933 | xhci->quirks |= XHCI_SPURIOUS_SUCCESS; |
| 4934 | |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4935 | /* Make sure the HC is halted. */ |
| 4936 | retval = xhci_halt(xhci); |
| 4937 | if (retval) |
Roger Quadros | cd33a32 | 2015-05-29 17:01:46 +0300 | [diff] [blame] | 4938 | return retval; |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4939 | |
| 4940 | xhci_dbg(xhci, "Resetting HCD\n"); |
| 4941 | /* Reset the internal HC memory state and registers. */ |
| 4942 | retval = xhci_reset(xhci); |
| 4943 | if (retval) |
Roger Quadros | cd33a32 | 2015-05-29 17:01:46 +0300 | [diff] [blame] | 4944 | return retval; |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4945 | xhci_dbg(xhci, "Reset complete\n"); |
| 4946 | |
Xenia Ragiadakou | c10cf11 | 2013-08-14 05:55:19 +0300 | [diff] [blame] | 4947 | /* Set dma_mask and coherent_dma_mask to 64-bits, |
| 4948 | * if xHC supports 64-bit addressing */ |
| 4949 | if (HCC_64BIT_ADDR(xhci->hcc_params) && |
| 4950 | !dma_set_mask(dev, DMA_BIT_MASK(64))) { |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4951 | xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); |
Xenia Ragiadakou | c10cf11 | 2013-08-14 05:55:19 +0300 | [diff] [blame] | 4952 | dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); |
Duc Dang | fda182d | 2015-10-09 13:30:13 +0300 | [diff] [blame] | 4953 | } else { |
| 4954 | /* |
| 4955 | * This is to avoid error in cases where a 32-bit USB |
| 4956 | * controller is used on a 64-bit capable system. |
| 4957 | */ |
| 4958 | retval = dma_set_mask(dev, DMA_BIT_MASK(32)); |
| 4959 | if (retval) |
| 4960 | return retval; |
| 4961 | xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); |
| 4962 | dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4963 | } |
| 4964 | |
| 4965 | xhci_dbg(xhci, "Calling HCD init\n"); |
| 4966 | /* Initialize HCD and host controller data structures. */ |
| 4967 | retval = xhci_init(hcd); |
| 4968 | if (retval) |
Roger Quadros | cd33a32 | 2015-05-29 17:01:46 +0300 | [diff] [blame] | 4969 | return retval; |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4970 | xhci_dbg(xhci, "Called HCD init\n"); |
Hans de Goede | 9970509 | 2015-01-16 17:54:01 +0200 | [diff] [blame] | 4971 | |
| 4972 | xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n", |
| 4973 | xhci->hcc_params, xhci->hci_version, xhci->quirks); |
| 4974 | |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4975 | return 0; |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4976 | } |
Andrew Bresticker | 436e8c7 | 2014-10-03 11:35:28 +0300 | [diff] [blame] | 4977 | EXPORT_SYMBOL_GPL(xhci_gen_setup); |
Sebastian Andrzej Siewior | 552e0c4 | 2011-09-23 14:20:01 -0700 | [diff] [blame] | 4978 | |
Andrew Bresticker | 1885d9a | 2014-10-03 11:35:26 +0300 | [diff] [blame] | 4979 | static const struct hc_driver xhci_hc_driver = { |
| 4980 | .description = "xhci-hcd", |
| 4981 | .product_desc = "xHCI Host Controller", |
Yoshihiro Shimoda | 32479d4 | 2015-11-24 13:09:47 +0200 | [diff] [blame] | 4982 | .hcd_priv_size = sizeof(struct xhci_hcd), |
Andrew Bresticker | 1885d9a | 2014-10-03 11:35:26 +0300 | [diff] [blame] | 4983 | |
| 4984 | /* |
| 4985 | * generic hardware linkage |
| 4986 | */ |
| 4987 | .irq = xhci_irq, |
| 4988 | .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED, |
| 4989 | |
| 4990 | /* |
| 4991 | * basic lifecycle operations |
| 4992 | */ |
| 4993 | .reset = NULL, /* set in xhci_init_driver() */ |
| 4994 | .start = xhci_run, |
| 4995 | .stop = xhci_stop, |
| 4996 | .shutdown = xhci_shutdown, |
| 4997 | |
| 4998 | /* |
| 4999 | * managing i/o requests and associated device resources |
| 5000 | */ |
| 5001 | .urb_enqueue = xhci_urb_enqueue, |
| 5002 | .urb_dequeue = xhci_urb_dequeue, |
| 5003 | .alloc_dev = xhci_alloc_dev, |
| 5004 | .free_dev = xhci_free_dev, |
| 5005 | .alloc_streams = xhci_alloc_streams, |
| 5006 | .free_streams = xhci_free_streams, |
| 5007 | .add_endpoint = xhci_add_endpoint, |
| 5008 | .drop_endpoint = xhci_drop_endpoint, |
| 5009 | .endpoint_reset = xhci_endpoint_reset, |
| 5010 | .check_bandwidth = xhci_check_bandwidth, |
| 5011 | .reset_bandwidth = xhci_reset_bandwidth, |
| 5012 | .address_device = xhci_address_device, |
| 5013 | .enable_device = xhci_enable_device, |
| 5014 | .update_hub_device = xhci_update_hub_device, |
| 5015 | .reset_device = xhci_discover_or_reset_device, |
| 5016 | |
| 5017 | /* |
| 5018 | * scheduling support |
| 5019 | */ |
| 5020 | .get_frame_number = xhci_get_frame, |
| 5021 | |
| 5022 | /* |
| 5023 | * root hub support |
| 5024 | */ |
| 5025 | .hub_control = xhci_hub_control, |
| 5026 | .hub_status_data = xhci_hub_status_data, |
| 5027 | .bus_suspend = xhci_bus_suspend, |
| 5028 | .bus_resume = xhci_bus_resume, |
| 5029 | |
| 5030 | /* |
| 5031 | * call back when device connected and addressed |
| 5032 | */ |
| 5033 | .update_device = xhci_update_device, |
| 5034 | .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, |
| 5035 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, |
| 5036 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, |
| 5037 | .find_raw_port_number = xhci_find_raw_port_number, |
| 5038 | }; |
| 5039 | |
Roger Quadros | cd33a32 | 2015-05-29 17:01:46 +0300 | [diff] [blame] | 5040 | void xhci_init_driver(struct hc_driver *drv, |
| 5041 | const struct xhci_driver_overrides *over) |
Andrew Bresticker | 1885d9a | 2014-10-03 11:35:26 +0300 | [diff] [blame] | 5042 | { |
Roger Quadros | cd33a32 | 2015-05-29 17:01:46 +0300 | [diff] [blame] | 5043 | BUG_ON(!over); |
| 5044 | |
| 5045 | /* Copy the generic table to drv then apply the overrides */ |
Andrew Bresticker | 1885d9a | 2014-10-03 11:35:26 +0300 | [diff] [blame] | 5046 | *drv = xhci_hc_driver; |
Roger Quadros | cd33a32 | 2015-05-29 17:01:46 +0300 | [diff] [blame] | 5047 | |
| 5048 | if (over) { |
| 5049 | drv->hcd_priv_size += over->extra_priv_size; |
| 5050 | if (over->reset) |
| 5051 | drv->reset = over->reset; |
| 5052 | if (over->start) |
| 5053 | drv->start = over->start; |
| 5054 | } |
Andrew Bresticker | 1885d9a | 2014-10-03 11:35:26 +0300 | [diff] [blame] | 5055 | } |
| 5056 | EXPORT_SYMBOL_GPL(xhci_init_driver); |
| 5057 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 5058 | MODULE_DESCRIPTION(DRIVER_DESC); |
| 5059 | MODULE_AUTHOR(DRIVER_AUTHOR); |
| 5060 | MODULE_LICENSE("GPL"); |
| 5061 | |
| 5062 | static int __init xhci_hcd_init(void) |
| 5063 | { |
Sarah Sharp | 9844197 | 2009-05-14 11:44:18 -0700 | [diff] [blame] | 5064 | /* |
| 5065 | * Check the compiler generated sizes of structures that must be laid |
| 5066 | * out in specific ways for hardware access. |
| 5067 | */ |
| 5068 | BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); |
| 5069 | BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); |
| 5070 | BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); |
| 5071 | /* xhci_device_control has eight fields, and also |
| 5072 | * embeds one xhci_slot_ctx and 31 xhci_ep_ctx |
| 5073 | */ |
Sarah Sharp | 9844197 | 2009-05-14 11:44:18 -0700 | [diff] [blame] | 5074 | BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); |
| 5075 | BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); |
| 5076 | BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); |
Lu Baolu | 04abb6d | 2015-10-01 18:40:31 +0300 | [diff] [blame] | 5077 | BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); |
Sarah Sharp | 9844197 | 2009-05-14 11:44:18 -0700 | [diff] [blame] | 5078 | BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); |
| 5079 | /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ |
| 5080 | BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); |
Oliver Neukum | 1eaf35e | 2015-12-03 15:03:34 +0100 | [diff] [blame] | 5081 | |
| 5082 | if (usb_disabled()) |
| 5083 | return -ENODEV; |
| 5084 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 5085 | return 0; |
| 5086 | } |
Arthur Demchenkov | b04c846 | 2015-05-19 16:30:50 +0300 | [diff] [blame] | 5087 | |
| 5088 | /* |
| 5089 | * If an init function is provided, an exit function must also be provided |
| 5090 | * to allow module unload. |
| 5091 | */ |
| 5092 | static void __exit xhci_hcd_fini(void) { } |
| 5093 | |
Sarah Sharp | 66d4ead | 2009-04-27 19:52:28 -0700 | [diff] [blame] | 5094 | module_init(xhci_hcd_init); |
Arthur Demchenkov | b04c846 | 2015-05-19 16:30:50 +0300 | [diff] [blame] | 5095 | module_exit(xhci_hcd_fini); |