blob: 5c3a3f75e4d54d59314f372794403c1a8ed089c2 [file] [log] [blame]
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Dong Nguyen43b86af2010-07-21 16:56:08 -070023#include <linux/pci.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070024#include <linux/irq.h>
Sarah Sharp8df75f42010-04-02 15:34:16 -070025#include <linux/log2.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070026#include <linux/module.h>
Sarah Sharpb0567b32009-08-07 14:04:36 -070027#include <linux/moduleparam.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070029
30#include "xhci.h"
31
32#define DRIVER_AUTHOR "Sarah Sharp"
33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
34
Sarah Sharpb0567b32009-08-07 14:04:36 -070035/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36static int link_quirk;
37module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
39
Sarah Sharp66d4ead2009-04-27 19:52:28 -070040/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/*
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
47 *
48 * Returns negative errno, or zero on success
49 *
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
53 */
54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 u32 mask, u32 done, int usec)
56{
57 u32 result;
58
59 do {
60 result = xhci_readl(xhci, ptr);
61 if (result == ~(u32)0) /* card removed */
62 return -ENODEV;
63 result &= mask;
64 if (result == done)
65 return 0;
66 udelay(1);
67 usec--;
68 } while (usec > 0);
69 return -ETIMEDOUT;
70}
71
72/*
Sarah Sharp4f0f0ba2009-10-27 10:56:33 -070073 * Disable interrupts and begin the xHCI halting process.
74 */
75void xhci_quiesce(struct xhci_hcd *xhci)
76{
77 u32 halted;
78 u32 cmd;
79 u32 mask;
80
81 mask = ~(XHCI_IRQS);
82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
83 if (!halted)
84 mask &= ~CMD_RUN;
85
86 cmd = xhci_readl(xhci, &xhci->op_regs->command);
87 cmd &= mask;
88 xhci_writel(xhci, cmd, &xhci->op_regs->command);
89}
90
91/*
Sarah Sharp66d4ead2009-04-27 19:52:28 -070092 * Force HC into halt state.
93 *
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
Andiry Xubdfca502011-01-06 15:43:39 +080096 * should halt within 16 ms of the run/stop bit being cleared.
Sarah Sharp66d4ead2009-04-27 19:52:28 -070097 * Read HC Halted bit in the status register to see when the HC is finished.
Sarah Sharp66d4ead2009-04-27 19:52:28 -070098 */
99int xhci_halt(struct xhci_hcd *xhci)
100{
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800101 int ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700102 xhci_dbg(xhci, "// Halt the HC\n");
Sarah Sharp4f0f0ba2009-10-27 10:56:33 -0700103 xhci_quiesce(xhci);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700104
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800105 ret = handshake(xhci, &xhci->op_regs->status,
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800107 if (!ret)
108 xhci->xhc_state |= XHCI_STATE_HALTED;
Sarah Sharp5af98bb2012-03-16 12:58:20 -0700109 else
110 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
111 XHCI_MAX_HALT_USEC);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800112 return ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700113}
114
115/*
Sarah Sharped074532010-05-24 13:25:21 -0700116 * Set the run bit and wait for the host to be running.
117 */
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800118static int xhci_start(struct xhci_hcd *xhci)
Sarah Sharped074532010-05-24 13:25:21 -0700119{
120 u32 temp;
121 int ret;
122
123 temp = xhci_readl(xhci, &xhci->op_regs->command);
124 temp |= (CMD_RUN);
125 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
126 temp);
127 xhci_writel(xhci, temp, &xhci->op_regs->command);
128
129 /*
130 * Wait for the HCHalted Status bit to be 0 to indicate the host is
131 * running.
132 */
133 ret = handshake(xhci, &xhci->op_regs->status,
134 STS_HALT, 0, XHCI_MAX_HALT_USEC);
135 if (ret == -ETIMEDOUT)
136 xhci_err(xhci, "Host took too long to start, "
137 "waited %u microseconds.\n",
138 XHCI_MAX_HALT_USEC);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800139 if (!ret)
140 xhci->xhc_state &= ~XHCI_STATE_HALTED;
Sarah Sharped074532010-05-24 13:25:21 -0700141 return ret;
142}
143
144/*
Sarah Sharpac04e6f2011-03-11 08:47:33 -0800145 * Reset a halted HC.
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700146 *
147 * This resets pipelines, timers, counters, state machines, etc.
148 * Transactions will be terminated immediately, and operational registers
149 * will be set to their defaults.
150 */
151int xhci_reset(struct xhci_hcd *xhci)
152{
153 u32 command;
154 u32 state;
Andiry Xuf370b992012-04-14 02:54:30 +0800155 int ret, i;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700156
157 state = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharpd3512f62009-07-27 12:03:50 -0700158 if ((state & STS_HALT) == 0) {
159 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
160 return 0;
161 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700162
163 xhci_dbg(xhci, "// Reset the HC\n");
164 command = xhci_readl(xhci, &xhci->op_regs->command);
165 command |= CMD_RESET;
166 xhci_writel(xhci, command, &xhci->op_regs->command);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700167
Sarah Sharp2d62f3e2010-05-24 13:25:15 -0700168 ret = handshake(xhci, &xhci->op_regs->command,
Sarah Sharp22ceac12012-07-23 16:06:08 -0700169 CMD_RESET, 0, 10 * 1000 * 1000);
Sarah Sharp2d62f3e2010-05-24 13:25:15 -0700170 if (ret)
171 return ret;
172
173 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
174 /*
175 * xHCI cannot write to any doorbells or operational registers other
176 * than status until the "Controller Not Ready" flag is cleared.
177 */
Sarah Sharp22ceac12012-07-23 16:06:08 -0700178 ret = handshake(xhci, &xhci->op_regs->status,
179 STS_CNR, 0, 10 * 1000 * 1000);
Andiry Xuf370b992012-04-14 02:54:30 +0800180
181 for (i = 0; i < 2; ++i) {
182 xhci->bus_state[i].port_c_suspend = 0;
183 xhci->bus_state[i].suspended_ports = 0;
184 xhci->bus_state[i].resuming_ports = 0;
185 }
186
187 return ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700188}
189
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700190#ifdef CONFIG_PCI
191static int xhci_free_msi(struct xhci_hcd *xhci)
Dong Nguyen43b86af2010-07-21 16:56:08 -0700192{
193 int i;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700194
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700195 if (!xhci->msix_entries)
196 return -EINVAL;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700197
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700198 for (i = 0; i < xhci->msix_count; i++)
199 if (xhci->msix_entries[i].vector)
200 free_irq(xhci->msix_entries[i].vector,
201 xhci_to_hcd(xhci));
202 return 0;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700203}
204
205/*
206 * Set up MSI
207 */
208static int xhci_setup_msi(struct xhci_hcd *xhci)
209{
210 int ret;
211 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
212
213 ret = pci_enable_msi(pdev);
214 if (ret) {
Sarah Sharp3b9783b2011-12-22 15:02:13 -0800215 xhci_dbg(xhci, "failed to allocate MSI entry\n");
Dong Nguyen43b86af2010-07-21 16:56:08 -0700216 return ret;
217 }
218
219 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
220 0, "xhci_hcd", xhci_to_hcd(xhci));
221 if (ret) {
Sarah Sharp3b9783b2011-12-22 15:02:13 -0800222 xhci_dbg(xhci, "disable MSI interrupt\n");
Dong Nguyen43b86af2010-07-21 16:56:08 -0700223 pci_disable_msi(pdev);
224 }
225
226 return ret;
227}
228
229/*
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700230 * Free IRQs
231 * free all IRQs request
232 */
233static void xhci_free_irq(struct xhci_hcd *xhci)
234{
235 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
236 int ret;
237
238 /* return if using legacy interrupt */
Felipe Balbicd704692012-02-29 16:46:23 +0200239 if (xhci_to_hcd(xhci)->irq > 0)
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700240 return;
241
242 ret = xhci_free_msi(xhci);
243 if (!ret)
244 return;
Felipe Balbicd704692012-02-29 16:46:23 +0200245 if (pdev->irq > 0)
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700246 free_irq(pdev->irq, xhci_to_hcd(xhci));
247
248 return;
249}
250
251/*
Dong Nguyen43b86af2010-07-21 16:56:08 -0700252 * Set up MSI-X
253 */
254static int xhci_setup_msix(struct xhci_hcd *xhci)
255{
256 int i, ret = 0;
Andiry Xu00292272010-12-27 17:39:02 +0800257 struct usb_hcd *hcd = xhci_to_hcd(xhci);
258 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700259
260 /*
261 * calculate number of msi-x vectors supported.
262 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
263 * with max number of interrupters based on the xhci HCSPARAMS1.
264 * - num_online_cpus: maximum msi-x vectors per CPUs core.
265 * Add additional 1 vector to ensure always available interrupt.
266 */
267 xhci->msix_count = min(num_online_cpus() + 1,
268 HCS_MAX_INTRS(xhci->hcs_params1));
269
270 xhci->msix_entries =
271 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
Greg Kroah-Hartman86871972010-11-11 09:41:02 -0800272 GFP_KERNEL);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700273 if (!xhci->msix_entries) {
274 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
275 return -ENOMEM;
276 }
Dong Nguyen43b86af2010-07-21 16:56:08 -0700277
278 for (i = 0; i < xhci->msix_count; i++) {
279 xhci->msix_entries[i].entry = i;
280 xhci->msix_entries[i].vector = 0;
281 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700282
283 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
284 if (ret) {
Sarah Sharp3b9783b2011-12-22 15:02:13 -0800285 xhci_dbg(xhci, "Failed to enable MSI-X\n");
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700286 goto free_entries;
287 }
288
Dong Nguyen43b86af2010-07-21 16:56:08 -0700289 for (i = 0; i < xhci->msix_count; i++) {
290 ret = request_irq(xhci->msix_entries[i].vector,
291 (irq_handler_t)xhci_msi_irq,
292 0, "xhci_hcd", xhci_to_hcd(xhci));
293 if (ret)
294 goto disable_msix;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700295 }
Dong Nguyen43b86af2010-07-21 16:56:08 -0700296
Andiry Xu00292272010-12-27 17:39:02 +0800297 hcd->msix_enabled = 1;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700298 return ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700299
300disable_msix:
Sarah Sharp3b9783b2011-12-22 15:02:13 -0800301 xhci_dbg(xhci, "disable MSI-X interrupt\n");
Dong Nguyen43b86af2010-07-21 16:56:08 -0700302 xhci_free_irq(xhci);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700303 pci_disable_msix(pdev);
304free_entries:
305 kfree(xhci->msix_entries);
306 xhci->msix_entries = NULL;
307 return ret;
308}
309
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700310/* Free any IRQs and disable MSI-X */
311static void xhci_cleanup_msix(struct xhci_hcd *xhci)
312{
Andiry Xu00292272010-12-27 17:39:02 +0800313 struct usb_hcd *hcd = xhci_to_hcd(xhci);
314 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700315
Dong Nguyen43b86af2010-07-21 16:56:08 -0700316 xhci_free_irq(xhci);
317
318 if (xhci->msix_entries) {
319 pci_disable_msix(pdev);
320 kfree(xhci->msix_entries);
321 xhci->msix_entries = NULL;
322 } else {
323 pci_disable_msi(pdev);
324 }
325
Andiry Xu00292272010-12-27 17:39:02 +0800326 hcd->msix_enabled = 0;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700327 return;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700328}
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700329
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700330static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
331{
332 int i;
333
334 if (xhci->msix_entries) {
335 for (i = 0; i < xhci->msix_count; i++)
336 synchronize_irq(xhci->msix_entries[i].vector);
337 }
338}
339
340static int xhci_try_enable_msi(struct usb_hcd *hcd)
341{
342 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
343 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
344 int ret;
345
346 /*
347 * Some Fresco Logic host controllers advertise MSI, but fail to
348 * generate interrupts. Don't even try to enable MSI.
349 */
350 if (xhci->quirks & XHCI_BROKEN_MSI)
351 return 0;
352
353 /* unregister the legacy interrupt */
354 if (hcd->irq)
355 free_irq(hcd->irq, hcd);
Felipe Balbicd704692012-02-29 16:46:23 +0200356 hcd->irq = 0;
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700357
358 ret = xhci_setup_msix(xhci);
359 if (ret)
360 /* fall back to msi*/
361 ret = xhci_setup_msi(xhci);
362
363 if (!ret)
Felipe Balbicd704692012-02-29 16:46:23 +0200364 /* hcd->irq is 0, we have MSI */
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700365 return 0;
366
Sarah Sharp68d07f62012-02-13 16:25:57 -0800367 if (!pdev->irq) {
368 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
369 return -EINVAL;
370 }
371
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700372 /* fall back to legacy interrupt*/
373 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
374 hcd->irq_descr, hcd);
375 if (ret) {
376 xhci_err(xhci, "request interrupt %d failed\n",
377 pdev->irq);
378 return ret;
379 }
380 hcd->irq = pdev->irq;
381 return 0;
382}
383
384#else
385
386static int xhci_try_enable_msi(struct usb_hcd *hcd)
387{
388 return 0;
389}
390
391static void xhci_cleanup_msix(struct xhci_hcd *xhci)
392{
393}
394
395static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
396{
397}
398
399#endif
400
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700401/*
402 * Initialize memory for HCD and xHC (one-time init).
403 *
404 * Program the PAGESIZE register, initialize the device context array, create
405 * device contexts (?), set up a command ring segment (or two?), create event
406 * ring (one for now).
407 */
408int xhci_init(struct usb_hcd *hcd)
409{
410 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
411 int retval = 0;
412
413 xhci_dbg(xhci, "xhci_init\n");
414 spin_lock_init(&xhci->lock);
Sebastian Andrzej Siewiord7826592011-09-13 16:41:10 -0700415 if (xhci->hci_version == 0x95 && link_quirk) {
Sarah Sharpb0567b32009-08-07 14:04:36 -0700416 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
417 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
418 } else {
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700419 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
Sarah Sharpb0567b32009-08-07 14:04:36 -0700420 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700421 retval = xhci_mem_init(xhci, GFP_KERNEL);
422 xhci_dbg(xhci, "Finished xhci_init\n");
423
424 return retval;
425}
426
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700427/*-------------------------------------------------------------------------*/
428
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700429
430#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800431static void xhci_event_ring_work(unsigned long arg)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700432{
433 unsigned long flags;
434 int temp;
Sarah Sharp8e595a52009-07-27 12:03:31 -0700435 u64 temp_64;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700436 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
437 int i, j;
438
439 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
440
441 spin_lock_irqsave(&xhci->lock, flags);
442 temp = xhci_readl(xhci, &xhci->op_regs->status);
443 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
Sarah Sharp7bd89b42011-07-01 13:35:40 -0700444 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
445 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharpe4ab05d2009-09-16 16:42:30 -0700446 xhci_dbg(xhci, "HW died, polling stopped.\n");
447 spin_unlock_irqrestore(&xhci->lock, flags);
448 return;
449 }
450
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700451 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
452 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700453 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
454 xhci->error_bitmask = 0;
455 xhci_dbg(xhci, "Event ring:\n");
456 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
457 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
Sarah Sharp8e595a52009-07-27 12:03:31 -0700458 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
459 temp_64 &= ~ERST_PTR_MASK;
460 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700461 xhci_dbg(xhci, "Command ring:\n");
462 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
463 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
464 xhci_dbg_cmd_ptrs(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700465 for (i = 0; i < MAX_HC_SLOTS; ++i) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700466 if (!xhci->devs[i])
467 continue;
468 for (j = 0; j < 31; ++j) {
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700469 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700470 }
471 }
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700472 spin_unlock_irqrestore(&xhci->lock, flags);
473
474 if (!xhci->zombie)
475 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
476 else
477 xhci_dbg(xhci, "Quit polling the event ring.\n");
478}
479#endif
480
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800481static int xhci_run_finished(struct xhci_hcd *xhci)
482{
483 if (xhci_start(xhci)) {
484 xhci_halt(xhci);
485 return -ENODEV;
486 }
487 xhci->shared_hcd->state = HC_STATE_RUNNING;
488
489 if (xhci->quirks & XHCI_NEC_HOST)
490 xhci_ring_cmd_db(xhci);
491
492 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
493 return 0;
494}
495
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700496/*
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700497 * Start the HC after it was halted.
498 *
499 * This function is called by the USB core when the HC driver is added.
500 * Its opposite is xhci_stop().
501 *
502 * xhci_init() must be called once before this function can be called.
503 * Reset the HC, enable device slot contexts, program DCBAAP, and
504 * set command ring pointer and event ring pointer.
505 *
506 * Setup MSI-X vectors and enable interrupts.
507 */
508int xhci_run(struct usb_hcd *hcd)
509{
510 u32 temp;
Sarah Sharp8e595a52009-07-27 12:03:31 -0700511 u64 temp_64;
Sebastian Andrzej Siewior3fd1ec52011-09-23 14:19:57 -0700512 int ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700513 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700514
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800515 /* Start the xHCI host controller running only after the USB 2.0 roothub
516 * is setup.
517 */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700518
Sarah Sharp0f2a7932009-04-27 19:57:12 -0700519 hcd->uses_new_polling = 1;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800520 if (!usb_hcd_is_primary_hcd(hcd))
521 return xhci_run_finished(xhci);
Sarah Sharp0f2a7932009-04-27 19:57:12 -0700522
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700523 xhci_dbg(xhci, "xhci_run\n");
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700524
Sebastian Andrzej Siewior3fd1ec52011-09-23 14:19:57 -0700525 ret = xhci_try_enable_msi(hcd);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700526 if (ret)
Sebastian Andrzej Siewior3fd1ec52011-09-23 14:19:57 -0700527 return ret;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700528
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700529#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
530 init_timer(&xhci->event_ring_timer);
531 xhci->event_ring_timer.data = (unsigned long) xhci;
Sarah Sharp23e3be12009-04-29 19:05:20 -0700532 xhci->event_ring_timer.function = xhci_event_ring_work;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700533 /* Poll the event ring */
534 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
535 xhci->zombie = 0;
536 xhci_dbg(xhci, "Setting event ring polling timer\n");
537 add_timer(&xhci->event_ring_timer);
538#endif
539
Sarah Sharp66e49d82009-07-27 12:03:46 -0700540 xhci_dbg(xhci, "Command ring memory map follows:\n");
541 xhci_debug_ring(xhci, xhci->cmd_ring);
542 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
543 xhci_dbg_cmd_ptrs(xhci);
544
545 xhci_dbg(xhci, "ERST memory map follows:\n");
546 xhci_dbg_erst(xhci, &xhci->erst);
547 xhci_dbg(xhci, "Event ring:\n");
548 xhci_debug_ring(xhci, xhci->event_ring);
549 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
550 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
551 temp_64 &= ~ERST_PTR_MASK;
552 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
553
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700554 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
555 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
Sarah Sharpa4d88302009-05-14 11:44:26 -0700556 temp &= ~ER_IRQ_INTERVAL_MASK;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700557 temp |= (u32) 160;
558 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
559
560 /* Set the HCD state before we enable the irqs */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700561 temp = xhci_readl(xhci, &xhci->op_regs->command);
562 temp |= (CMD_EIE);
563 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
564 temp);
565 xhci_writel(xhci, temp, &xhci->op_regs->command);
566
567 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700568 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
569 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700570 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
571 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800572 xhci_print_ir_set(xhci, 0);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700573
Sarah Sharp02386342010-05-24 13:25:28 -0700574 if (xhci->quirks & XHCI_NEC_HOST)
575 xhci_queue_vendor_command(xhci, 0, 0, 0,
576 TRB_TYPE(TRB_NEC_GET_FW));
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700577
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800578 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700579 return 0;
580}
581
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800582static void xhci_only_stop_hcd(struct usb_hcd *hcd)
583{
584 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
585
586 spin_lock_irq(&xhci->lock);
587 xhci_halt(xhci);
588
589 /* The shared_hcd is going to be deallocated shortly (the USB core only
590 * calls this function when allocation fails in usb_add_hcd(), or
591 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
592 */
593 xhci->shared_hcd = NULL;
594 spin_unlock_irq(&xhci->lock);
595}
596
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700597/*
598 * Stop xHCI driver.
599 *
600 * This function is called by the USB core when the HC driver is removed.
601 * Its opposite is xhci_run().
602 *
603 * Disable device contexts, disable IRQs, and quiesce the HC.
604 * Reset the HC, finish any completed transactions, and cleanup memory.
605 */
606void xhci_stop(struct usb_hcd *hcd)
607{
608 u32 temp;
609 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
610
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800611 if (!usb_hcd_is_primary_hcd(hcd)) {
612 xhci_only_stop_hcd(xhci->shared_hcd);
613 return;
614 }
615
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700616 spin_lock_irq(&xhci->lock);
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800617 /* Make sure the xHC is halted for a USB3 roothub
618 * (xhci_stop() could be called as part of failed init).
619 */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700620 xhci_halt(xhci);
621 xhci_reset(xhci);
622 spin_unlock_irq(&xhci->lock);
623
Zhang Rui40a9fb12010-12-17 13:17:04 -0800624 xhci_cleanup_msix(xhci);
625
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700626#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
627 /* Tell the event ring poll function not to reschedule */
628 xhci->zombie = 1;
629 del_timer_sync(&xhci->event_ring_timer);
630#endif
631
Andiry Xuc41136b2011-03-22 17:08:14 +0800632 if (xhci->quirks & XHCI_AMD_PLL_FIX)
633 usb_amd_dev_put();
634
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700635 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
636 temp = xhci_readl(xhci, &xhci->op_regs->status);
637 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
638 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
639 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
640 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800641 xhci_print_ir_set(xhci, 0);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700642
643 xhci_dbg(xhci, "cleaning up memory\n");
644 xhci_mem_cleanup(xhci);
645 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
646 xhci_readl(xhci, &xhci->op_regs->status));
647}
648
649/*
650 * Shutdown HC (not bus-specific)
651 *
652 * This is called when the machine is rebooting or halting. We assume that the
653 * machine will be powered off, and the HC's internal state will be reset.
654 * Don't bother to free memory.
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800655 *
656 * This will only ever be called with the main usb_hcd (the USB3 roothub).
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700657 */
658void xhci_shutdown(struct usb_hcd *hcd)
659{
660 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
661
662 spin_lock_irq(&xhci->lock);
663 xhci_halt(xhci);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700664 spin_unlock_irq(&xhci->lock);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700665
Zhang Rui40a9fb12010-12-17 13:17:04 -0800666 xhci_cleanup_msix(xhci);
667
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700668 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
669 xhci_readl(xhci, &xhci->op_regs->status));
670}
671
Sarah Sharpb5b5c3a2010-10-15 11:24:14 -0700672#ifdef CONFIG_PM
Andiry Xu5535b1d2010-10-14 07:23:06 -0700673static void xhci_save_registers(struct xhci_hcd *xhci)
674{
675 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
676 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
677 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
678 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700679 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
680 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
681 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
Sarah Sharpc7713e72012-03-16 13:19:35 -0700682 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
683 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700684}
685
686static void xhci_restore_registers(struct xhci_hcd *xhci)
687{
688 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
689 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
690 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
691 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700692 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
693 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
Sarah Sharpfb3d85b2012-03-16 13:27:39 -0700694 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
Sarah Sharpc7713e72012-03-16 13:19:35 -0700695 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
696 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700697}
698
Sarah Sharp89821322010-11-12 11:59:31 -0800699static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
700{
701 u64 val_64;
702
703 /* step 2: initialize command ring buffer */
704 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
705 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
706 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
707 xhci->cmd_ring->dequeue) &
708 (u64) ~CMD_RING_RSVD_BITS) |
709 xhci->cmd_ring->cycle_state;
710 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
711 (long unsigned long) val_64);
712 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
713}
714
715/*
716 * The whole command ring must be cleared to zero when we suspend the host.
717 *
718 * The host doesn't save the command ring pointer in the suspend well, so we
719 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
720 * aligned, because of the reserved bits in the command ring dequeue pointer
721 * register. Therefore, we can't just set the dequeue pointer back in the
722 * middle of the ring (TRBs are 16-byte aligned).
723 */
724static void xhci_clear_command_ring(struct xhci_hcd *xhci)
725{
726 struct xhci_ring *ring;
727 struct xhci_segment *seg;
728
729 ring = xhci->cmd_ring;
730 seg = ring->deq_seg;
731 do {
Andiry Xu158886c2011-11-30 16:37:41 +0800732 memset(seg->trbs, 0,
733 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
734 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
735 cpu_to_le32(~TRB_CYCLE);
Sarah Sharp89821322010-11-12 11:59:31 -0800736 seg = seg->next;
737 } while (seg != ring->deq_seg);
738
739 /* Reset the software enqueue and dequeue pointers */
740 ring->deq_seg = ring->first_seg;
741 ring->dequeue = ring->first_seg->trbs;
742 ring->enq_seg = ring->deq_seg;
743 ring->enqueue = ring->dequeue;
744
Andiry Xub008df62012-03-05 17:49:34 +0800745 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
Sarah Sharp89821322010-11-12 11:59:31 -0800746 /*
747 * Ring is now zeroed, so the HW should look for change of ownership
748 * when the cycle bit is set to 1.
749 */
750 ring->cycle_state = 1;
751
752 /*
753 * Reset the hardware dequeue pointer.
754 * Yes, this will need to be re-written after resume, but we're paranoid
755 * and want to make sure the hardware doesn't access bogus memory
756 * because, say, the BIOS or an SMI started the host without changing
757 * the command ring pointers.
758 */
759 xhci_set_cmd_ring_deq(xhci);
760}
761
Andiry Xu5535b1d2010-10-14 07:23:06 -0700762/*
763 * Stop HC (not bus-specific)
764 *
765 * This is called when the machine transition into S3/S4 mode.
766 *
767 */
768int xhci_suspend(struct xhci_hcd *xhci)
769{
770 int rc = 0;
771 struct usb_hcd *hcd = xhci_to_hcd(xhci);
772 u32 command;
773
774 spin_lock_irq(&xhci->lock);
775 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Sarah Sharpb3209372011-03-07 11:24:07 -0800776 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700777 /* step 1: stop endpoint */
778 /* skipped assuming that port suspend has done */
779
780 /* step 2: clear Run/Stop bit */
781 command = xhci_readl(xhci, &xhci->op_regs->command);
782 command &= ~CMD_RUN;
783 xhci_writel(xhci, command, &xhci->op_regs->command);
784 if (handshake(xhci, &xhci->op_regs->status,
785 STS_HALT, STS_HALT, 100*100)) {
786 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
787 spin_unlock_irq(&xhci->lock);
788 return -ETIMEDOUT;
789 }
Sarah Sharp89821322010-11-12 11:59:31 -0800790 xhci_clear_command_ring(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700791
792 /* step 3: save registers */
793 xhci_save_registers(xhci);
794
795 /* step 4: set CSS flag */
796 command = xhci_readl(xhci, &xhci->op_regs->command);
797 command |= CMD_CSS;
798 xhci_writel(xhci, command, &xhci->op_regs->command);
Andiry Xu622eb782012-06-13 10:51:57 +0800799 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
800 xhci_warn(xhci, "WARN: xHC save state timeout\n");
Andiry Xu5535b1d2010-10-14 07:23:06 -0700801 spin_unlock_irq(&xhci->lock);
802 return -ETIMEDOUT;
803 }
Andiry Xu5535b1d2010-10-14 07:23:06 -0700804 spin_unlock_irq(&xhci->lock);
805
Andiry Xu00292272010-12-27 17:39:02 +0800806 /* step 5: remove core well power */
807 /* synchronize irq when using MSI-X */
Sebastian Andrzej Siewior421aa842011-09-23 14:19:58 -0700808 xhci_msix_sync_irqs(xhci);
Andiry Xu00292272010-12-27 17:39:02 +0800809
Andiry Xu5535b1d2010-10-14 07:23:06 -0700810 return rc;
811}
812
813/*
814 * start xHC (not bus-specific)
815 *
816 * This is called when the machine transition from S3/S4 mode.
817 *
818 */
819int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
820{
821 u32 command, temp = 0;
822 struct usb_hcd *hcd = xhci_to_hcd(xhci);
Sarah Sharp65b22f92010-12-17 12:35:05 -0800823 struct usb_hcd *secondary_hcd;
Alan Sternf69e31202011-11-03 11:37:10 -0400824 int retval = 0;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700825
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800826 /* Wait a bit if either of the roothubs need to settle from the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300827 * transition into bus suspend.
Sarah Sharp20b67cf2010-12-15 12:47:14 -0800828 */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800829 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
830 time_before(jiffies,
831 xhci->bus_state[1].next_statechange))
Andiry Xu5535b1d2010-10-14 07:23:06 -0700832 msleep(100);
833
Alan Sternf69e31202011-11-03 11:37:10 -0400834 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
835 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
836
Andiry Xu5535b1d2010-10-14 07:23:06 -0700837 spin_lock_irq(&xhci->lock);
Maarten Lankhorstc877b3b2011-06-15 23:47:21 +0200838 if (xhci->quirks & XHCI_RESET_ON_RESUME)
839 hibernated = true;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700840
841 if (!hibernated) {
842 /* step 1: restore register */
843 xhci_restore_registers(xhci);
844 /* step 2: initialize command ring buffer */
Sarah Sharp89821322010-11-12 11:59:31 -0800845 xhci_set_cmd_ring_deq(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700846 /* step 3: restore state and start state*/
847 /* step 3: set CRS flag */
848 command = xhci_readl(xhci, &xhci->op_regs->command);
849 command |= CMD_CRS;
850 xhci_writel(xhci, command, &xhci->op_regs->command);
851 if (handshake(xhci, &xhci->op_regs->status,
Andiry Xu622eb782012-06-13 10:51:57 +0800852 STS_RESTORE, 0, 10 * 1000)) {
853 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
Andiry Xu5535b1d2010-10-14 07:23:06 -0700854 spin_unlock_irq(&xhci->lock);
855 return -ETIMEDOUT;
856 }
857 temp = xhci_readl(xhci, &xhci->op_regs->status);
858 }
859
860 /* If restore operation fails, re-initialize the HC during resume */
861 if ((temp & STS_SRE) || hibernated) {
Sarah Sharpfedd3832011-04-12 17:43:19 -0700862 /* Let the USB core know _both_ roothubs lost power. */
863 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
864 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700865
866 xhci_dbg(xhci, "Stop HCD\n");
867 xhci_halt(xhci);
868 xhci_reset(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700869 spin_unlock_irq(&xhci->lock);
Andiry Xu00292272010-12-27 17:39:02 +0800870 xhci_cleanup_msix(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700871
872#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
873 /* Tell the event ring poll function not to reschedule */
874 xhci->zombie = 1;
875 del_timer_sync(&xhci->event_ring_timer);
876#endif
877
878 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
879 temp = xhci_readl(xhci, &xhci->op_regs->status);
880 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
881 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
882 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
883 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800884 xhci_print_ir_set(xhci, 0);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700885
886 xhci_dbg(xhci, "cleaning up memory\n");
887 xhci_mem_cleanup(xhci);
888 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
889 xhci_readl(xhci, &xhci->op_regs->status));
890
Sarah Sharp65b22f92010-12-17 12:35:05 -0800891 /* USB core calls the PCI reinit and start functions twice:
892 * first with the primary HCD, and then with the secondary HCD.
893 * If we don't do the same, the host will never be started.
894 */
895 if (!usb_hcd_is_primary_hcd(hcd))
896 secondary_hcd = hcd;
897 else
898 secondary_hcd = xhci->shared_hcd;
899
900 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
901 retval = xhci_init(hcd->primary_hcd);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700902 if (retval)
903 return retval;
Sarah Sharp65b22f92010-12-17 12:35:05 -0800904 xhci_dbg(xhci, "Start the primary HCD\n");
905 retval = xhci_run(hcd->primary_hcd);
Sarah Sharpb3209372011-03-07 11:24:07 -0800906 if (!retval) {
Alan Sternf69e31202011-11-03 11:37:10 -0400907 xhci_dbg(xhci, "Start the secondary HCD\n");
908 retval = xhci_run(secondary_hcd);
Sarah Sharpb3209372011-03-07 11:24:07 -0800909 }
Andiry Xu5535b1d2010-10-14 07:23:06 -0700910 hcd->state = HC_STATE_SUSPENDED;
Sarah Sharpb3209372011-03-07 11:24:07 -0800911 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
Alan Sternf69e31202011-11-03 11:37:10 -0400912 goto done;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700913 }
914
Andiry Xu5535b1d2010-10-14 07:23:06 -0700915 /* step 4: set Run/Stop bit */
916 command = xhci_readl(xhci, &xhci->op_regs->command);
917 command |= CMD_RUN;
918 xhci_writel(xhci, command, &xhci->op_regs->command);
919 handshake(xhci, &xhci->op_regs->status, STS_HALT,
920 0, 250 * 1000);
921
922 /* step 5: walk topology and initialize portsc,
923 * portpmsc and portli
924 */
925 /* this is done in bus_resume */
926
927 /* step 6: restart each of the previously
928 * Running endpoints by ringing their doorbells
929 */
930
Andiry Xu5535b1d2010-10-14 07:23:06 -0700931 spin_unlock_irq(&xhci->lock);
Alan Sternf69e31202011-11-03 11:37:10 -0400932
933 done:
934 if (retval == 0) {
935 usb_hcd_resume_root_hub(hcd);
936 usb_hcd_resume_root_hub(xhci->shared_hcd);
937 }
938 return retval;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700939}
Sarah Sharpb5b5c3a2010-10-15 11:24:14 -0700940#endif /* CONFIG_PM */
941
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700942/*-------------------------------------------------------------------------*/
943
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700944/**
945 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
946 * HCDs. Find the index for an endpoint given its descriptor. Use the return
947 * value to right shift 1 for the bitmask.
948 *
949 * Index = (epnum * 2) + direction - 1,
950 * where direction = 0 for OUT, 1 for IN.
951 * For control endpoints, the IN index is used (OUT index is unused), so
952 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
953 */
954unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
955{
956 unsigned int index;
957 if (usb_endpoint_xfer_control(desc))
958 index = (unsigned int) (usb_endpoint_num(desc)*2);
959 else
960 index = (unsigned int) (usb_endpoint_num(desc)*2) +
961 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
962 return index;
963}
964
Sarah Sharpf94e01862009-04-27 19:58:38 -0700965/* Find the flag for this endpoint (for use in the control context). Use the
966 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
967 * bit 1, etc.
968 */
969unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
970{
971 return 1 << (xhci_get_endpoint_index(desc) + 1);
972}
973
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700974/* Find the flag for this endpoint (for use in the control context). Use the
975 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
976 * bit 1, etc.
977 */
978unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
979{
980 return 1 << (ep_index + 1);
981}
982
Sarah Sharpf94e01862009-04-27 19:58:38 -0700983/* Compute the last valid endpoint context index. Basically, this is the
984 * endpoint index plus one. For slot contexts with more than valid endpoint,
985 * we find the most significant bit set in the added contexts flags.
986 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
987 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
988 */
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700989unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
Sarah Sharpf94e01862009-04-27 19:58:38 -0700990{
991 return fls(added_ctxs) - 1;
992}
993
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700994/* Returns 1 if the arguments are OK;
995 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
996 */
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800997static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
Andiry Xu64927732010-10-14 07:22:45 -0700998 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
999 const char *func) {
1000 struct xhci_hcd *xhci;
1001 struct xhci_virt_device *virt_dev;
1002
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001003 if (!hcd || (check_ep && !ep) || !udev) {
1004 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1005 func);
1006 return -EINVAL;
1007 }
1008 if (!udev->parent) {
1009 printk(KERN_DEBUG "xHCI %s called for root hub\n",
1010 func);
1011 return 0;
1012 }
Andiry Xu64927732010-10-14 07:22:45 -07001013
Sarah Sharp7bd89b42011-07-01 13:35:40 -07001014 xhci = hcd_to_xhci(hcd);
1015 if (xhci->xhc_state & XHCI_STATE_HALTED)
1016 return -ENODEV;
1017
Andiry Xu64927732010-10-14 07:22:45 -07001018 if (check_virt_dev) {
sifram.rajas@gmail.com73ddc242011-09-02 11:06:00 -07001019 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
Andiry Xu64927732010-10-14 07:22:45 -07001020 printk(KERN_DEBUG "xHCI %s called with unaddressed "
1021 "device\n", func);
1022 return -EINVAL;
1023 }
1024
1025 virt_dev = xhci->devs[udev->slot_id];
1026 if (virt_dev->udev != udev) {
1027 printk(KERN_DEBUG "xHCI %s called with udev and "
1028 "virt_dev does not match\n", func);
1029 return -EINVAL;
1030 }
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001031 }
Andiry Xu64927732010-10-14 07:22:45 -07001032
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001033 return 1;
1034}
1035
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001036static int xhci_configure_endpoint(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001037 struct usb_device *udev, struct xhci_command *command,
1038 bool ctx_change, bool must_succeed);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001039
1040/*
1041 * Full speed devices may have a max packet size greater than 8 bytes, but the
1042 * USB core doesn't know that until it reads the first 8 bytes of the
1043 * descriptor. If the usb_device's max packet size changes after that point,
1044 * we need to issue an evaluate context command and wait on it.
1045 */
1046static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1047 unsigned int ep_index, struct urb *urb)
1048{
1049 struct xhci_container_ctx *in_ctx;
1050 struct xhci_container_ctx *out_ctx;
1051 struct xhci_input_control_ctx *ctrl_ctx;
1052 struct xhci_ep_ctx *ep_ctx;
1053 int max_packet_size;
1054 int hw_max_packet_size;
1055 int ret = 0;
1056
1057 out_ctx = xhci->devs[slot_id]->out_ctx;
1058 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11001059 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001060 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001061 if (hw_max_packet_size != max_packet_size) {
1062 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1063 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1064 max_packet_size);
1065 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1066 hw_max_packet_size);
1067 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1068
1069 /* Set up the modified control endpoint 0 */
Sarah Sharp913a8a32009-09-04 10:53:13 -07001070 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1071 xhci->devs[slot_id]->out_ctx, ep_index);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001072 in_ctx = xhci->devs[slot_id]->in_ctx;
1073 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11001074 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1075 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001076
1077 /* Set up the input context flags for the command */
1078 /* FIXME: This won't work if a non-default control endpoint
1079 * changes max packet sizes.
1080 */
1081 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001082 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001083 ctrl_ctx->drop_flags = 0;
1084
1085 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1086 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1087 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1088 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1089
Sarah Sharp913a8a32009-09-04 10:53:13 -07001090 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1091 true, false);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001092
1093 /* Clean up the input context for later use by bandwidth
1094 * functions.
1095 */
Matt Evans28ccd292011-03-29 13:40:46 +11001096 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001097 }
1098 return ret;
1099}
1100
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001101/*
1102 * non-error returns are a promise to giveback() the urb later
1103 * we drop ownership so next owner (or urb unlink) can get it
1104 */
1105int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1106{
1107 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Andiry Xu2ffdea22011-09-02 11:05:57 -07001108 struct xhci_td *buffer;
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001109 unsigned long flags;
1110 int ret = 0;
1111 unsigned int slot_id, ep_index;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001112 struct urb_priv *urb_priv;
1113 int size, i;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001114
Andiry Xu64927732010-10-14 07:22:45 -07001115 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1116 true, true, __func__) <= 0)
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001117 return -EINVAL;
1118
1119 slot_id = urb->dev->slot_id;
1120 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001121
Alan Stern541c7d42010-06-22 16:39:10 -04001122 if (!HCD_HW_ACCESSIBLE(hcd)) {
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001123 if (!in_interrupt())
1124 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1125 ret = -ESHUTDOWN;
1126 goto exit;
1127 }
Andiry Xu8e51adc2010-07-22 15:23:31 -07001128
1129 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1130 size = urb->number_of_packets;
1131 else
1132 size = 1;
1133
1134 urb_priv = kzalloc(sizeof(struct urb_priv) +
1135 size * sizeof(struct xhci_td *), mem_flags);
1136 if (!urb_priv)
1137 return -ENOMEM;
1138
Andiry Xu2ffdea22011-09-02 11:05:57 -07001139 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1140 if (!buffer) {
1141 kfree(urb_priv);
1142 return -ENOMEM;
1143 }
1144
Andiry Xu8e51adc2010-07-22 15:23:31 -07001145 for (i = 0; i < size; i++) {
Andiry Xu2ffdea22011-09-02 11:05:57 -07001146 urb_priv->td[i] = buffer;
1147 buffer++;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001148 }
1149
1150 urb_priv->length = size;
1151 urb_priv->td_cnt = 0;
1152 urb->hcpriv = urb_priv;
1153
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001154 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1155 /* Check to see if the max packet size for the default control
1156 * endpoint changed during FS device enumeration
1157 */
1158 if (urb->dev->speed == USB_SPEED_FULL) {
1159 ret = xhci_check_maxpacket(xhci, slot_id,
1160 ep_index, urb);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001161 if (ret < 0) {
1162 xhci_urb_free_priv(xhci, urb_priv);
1163 urb->hcpriv = NULL;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001164 return ret;
Sarah Sharpd13565c2011-07-22 14:34:34 -07001165 }
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001166 }
1167
Sarah Sharpb11069f2009-07-27 12:03:23 -07001168 /* We have a spinlock and interrupts disabled, so we must pass
1169 * atomic context to this function, which may allocate memory.
1170 */
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001171 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001172 if (xhci->xhc_state & XHCI_STATE_DYING)
1173 goto dying;
Sarah Sharpb11069f2009-07-27 12:03:23 -07001174 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
Sarah Sharp23e3be12009-04-29 19:05:20 -07001175 slot_id, ep_index);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001176 if (ret)
1177 goto free_priv;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001178 spin_unlock_irqrestore(&xhci->lock, flags);
1179 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1180 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001181 if (xhci->xhc_state & XHCI_STATE_DYING)
1182 goto dying;
Sarah Sharp8df75f42010-04-02 15:34:16 -07001183 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1184 EP_GETTING_STREAMS) {
1185 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1186 "is transitioning to using streams.\n");
1187 ret = -EINVAL;
1188 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1189 EP_GETTING_NO_STREAMS) {
1190 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1191 "is transitioning to "
1192 "not having streams.\n");
1193 ret = -EINVAL;
1194 } else {
1195 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1196 slot_id, ep_index);
1197 }
Sarah Sharpd13565c2011-07-22 14:34:34 -07001198 if (ret)
1199 goto free_priv;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001200 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp624defa2009-09-02 12:14:28 -07001201 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1202 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001203 if (xhci->xhc_state & XHCI_STATE_DYING)
1204 goto dying;
Sarah Sharp624defa2009-09-02 12:14:28 -07001205 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1206 slot_id, ep_index);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001207 if (ret)
1208 goto free_priv;
Sarah Sharp624defa2009-09-02 12:14:28 -07001209 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001210 } else {
Andiry Xu787f4e52010-07-22 15:23:52 -07001211 spin_lock_irqsave(&xhci->lock, flags);
1212 if (xhci->xhc_state & XHCI_STATE_DYING)
1213 goto dying;
1214 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1215 slot_id, ep_index);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001216 if (ret)
1217 goto free_priv;
Andiry Xu787f4e52010-07-22 15:23:52 -07001218 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001219 }
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001220exit:
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001221 return ret;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001222dying:
1223 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1224 "non-responsive xHCI host.\n",
1225 urb->ep->desc.bEndpointAddress, urb);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001226 ret = -ESHUTDOWN;
1227free_priv:
1228 xhci_urb_free_priv(xhci, urb_priv);
1229 urb->hcpriv = NULL;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001230 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001231 return ret;
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001232}
1233
Sarah Sharp021bff92010-07-29 22:12:20 -07001234/* Get the right ring for the given URB.
1235 * If the endpoint supports streams, boundary check the URB's stream ID.
1236 * If the endpoint doesn't support streams, return the singular endpoint ring.
1237 */
1238static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1239 struct urb *urb)
1240{
1241 unsigned int slot_id;
1242 unsigned int ep_index;
1243 unsigned int stream_id;
1244 struct xhci_virt_ep *ep;
1245
1246 slot_id = urb->dev->slot_id;
1247 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1248 stream_id = urb->stream_id;
1249 ep = &xhci->devs[slot_id]->eps[ep_index];
1250 /* Common case: no streams */
1251 if (!(ep->ep_state & EP_HAS_STREAMS))
1252 return ep->ring;
1253
1254 if (stream_id == 0) {
1255 xhci_warn(xhci,
1256 "WARN: Slot ID %u, ep index %u has streams, "
1257 "but URB has no stream ID.\n",
1258 slot_id, ep_index);
1259 return NULL;
1260 }
1261
1262 if (stream_id < ep->stream_info->num_streams)
1263 return ep->stream_info->stream_rings[stream_id];
1264
1265 xhci_warn(xhci,
1266 "WARN: Slot ID %u, ep index %u has "
1267 "stream IDs 1 to %u allocated, "
1268 "but stream ID %u is requested.\n",
1269 slot_id, ep_index,
1270 ep->stream_info->num_streams - 1,
1271 stream_id);
1272 return NULL;
1273}
1274
Sarah Sharpae636742009-04-29 19:02:31 -07001275/*
1276 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1277 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1278 * should pick up where it left off in the TD, unless a Set Transfer Ring
1279 * Dequeue Pointer is issued.
1280 *
1281 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1282 * the ring. Since the ring is a contiguous structure, they can't be physically
1283 * removed. Instead, there are two options:
1284 *
1285 * 1) If the HC is in the middle of processing the URB to be canceled, we
1286 * simply move the ring's dequeue pointer past those TRBs using the Set
1287 * Transfer Ring Dequeue Pointer command. This will be the common case,
1288 * when drivers timeout on the last submitted URB and attempt to cancel.
1289 *
1290 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1291 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1292 * HC will need to invalidate the any TRBs it has cached after the stop
1293 * endpoint command, as noted in the xHCI 0.95 errata.
1294 *
1295 * 3) The TD may have completed by the time the Stop Endpoint Command
1296 * completes, so software needs to handle that case too.
1297 *
1298 * This function should protect against the TD enqueueing code ringing the
1299 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1300 * It also needs to account for multiple cancellations on happening at the same
1301 * time for the same endpoint.
1302 *
1303 * Note that this function can be called in any context, or so says
1304 * usb_hcd_unlink_urb()
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001305 */
1306int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1307{
Sarah Sharpae636742009-04-29 19:02:31 -07001308 unsigned long flags;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001309 int ret, i;
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001310 u32 temp;
Sarah Sharpae636742009-04-29 19:02:31 -07001311 struct xhci_hcd *xhci;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001312 struct urb_priv *urb_priv;
Sarah Sharpae636742009-04-29 19:02:31 -07001313 struct xhci_td *td;
1314 unsigned int ep_index;
1315 struct xhci_ring *ep_ring;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001316 struct xhci_virt_ep *ep;
Sarah Sharpae636742009-04-29 19:02:31 -07001317
1318 xhci = hcd_to_xhci(hcd);
1319 spin_lock_irqsave(&xhci->lock, flags);
1320 /* Make sure the URB hasn't completed or been unlinked already */
1321 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1322 if (ret || !urb->hcpriv)
1323 goto done;
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001324 temp = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -08001325 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001326 xhci_dbg(xhci, "HW died, freeing TD.\n");
Andiry Xu8e51adc2010-07-22 15:23:31 -07001327 urb_priv = urb->hcpriv;
Sarah Sharp585df1d2011-08-02 15:43:40 -07001328 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1329 td = urb_priv->td[i];
1330 if (!list_empty(&td->td_list))
1331 list_del_init(&td->td_list);
1332 if (!list_empty(&td->cancelled_td_list))
1333 list_del_init(&td->cancelled_td_list);
1334 }
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001335
1336 usb_hcd_unlink_urb_from_ep(hcd, urb);
1337 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp214f76f2010-10-26 11:22:02 -07001338 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
Andiry Xu8e51adc2010-07-22 15:23:31 -07001339 xhci_urb_free_priv(xhci, urb_priv);
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001340 return ret;
1341 }
Sarah Sharp7bd89b42011-07-01 13:35:40 -07001342 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1343 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001344 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1345 "non-responsive xHCI host.\n",
1346 urb->ep->desc.bEndpointAddress, urb);
1347 /* Let the stop endpoint command watchdog timer (which set this
1348 * state) finish cleaning up the endpoint TD lists. We must
1349 * have caught it in the middle of dropping a lock and giving
1350 * back an URB.
1351 */
1352 goto done;
1353 }
Sarah Sharpae636742009-04-29 19:02:31 -07001354
Sarah Sharpae636742009-04-29 19:02:31 -07001355 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001356 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001357 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1358 if (!ep_ring) {
1359 ret = -EINVAL;
1360 goto done;
1361 }
1362
Andiry Xu8e51adc2010-07-22 15:23:31 -07001363 urb_priv = urb->hcpriv;
Sarah Sharp79688ac2011-12-19 16:56:04 -08001364 i = urb_priv->td_cnt;
1365 if (i < urb_priv->length)
1366 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
1367 "starting at offset 0x%llx\n",
1368 urb, urb->dev->devpath,
1369 urb->ep->desc.bEndpointAddress,
1370 (unsigned long long) xhci_trb_virt_to_dma(
1371 urb_priv->td[i]->start_seg,
1372 urb_priv->td[i]->first_trb));
Andiry Xu8e51adc2010-07-22 15:23:31 -07001373
Sarah Sharp79688ac2011-12-19 16:56:04 -08001374 for (; i < urb_priv->length; i++) {
Andiry Xu8e51adc2010-07-22 15:23:31 -07001375 td = urb_priv->td[i];
1376 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1377 }
1378
Sarah Sharpae636742009-04-29 19:02:31 -07001379 /* Queue a stop endpoint command, but only if this is
1380 * the first cancellation to be handled.
1381 */
Sarah Sharp678539c2009-10-27 10:55:52 -07001382 if (!(ep->ep_state & EP_HALT_PENDING)) {
1383 ep->ep_state |= EP_HALT_PENDING;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001384 ep->stop_cmds_pending++;
1385 ep->stop_cmd_timer.expires = jiffies +
1386 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1387 add_timer(&ep->stop_cmd_timer);
Andiry Xube88fe42010-10-14 07:22:57 -07001388 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
Sarah Sharp23e3be12009-04-29 19:05:20 -07001389 xhci_ring_cmd_db(xhci);
Sarah Sharpae636742009-04-29 19:02:31 -07001390 }
1391done:
1392 spin_unlock_irqrestore(&xhci->lock, flags);
1393 return ret;
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001394}
1395
Sarah Sharpf94e01862009-04-27 19:58:38 -07001396/* Drop an endpoint from a new bandwidth configuration for this device.
1397 * Only one call to this function is allowed per endpoint before
1398 * check_bandwidth() or reset_bandwidth() must be called.
1399 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1400 * add the endpoint to the schedule with possibly new parameters denoted by a
1401 * different endpoint descriptor in usb_host_endpoint.
1402 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1403 * not allowed.
Sarah Sharpf88ba782009-05-14 11:44:22 -07001404 *
1405 * The USB core will not allow URBs to be queued to an endpoint that is being
1406 * disabled, so there's no need for mutual exclusion to protect
1407 * the xhci->devs[slot_id] structure.
Sarah Sharpf94e01862009-04-27 19:58:38 -07001408 */
1409int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1410 struct usb_host_endpoint *ep)
1411{
Sarah Sharpf94e01862009-04-27 19:58:38 -07001412 struct xhci_hcd *xhci;
John Yound115b042009-07-27 12:05:15 -07001413 struct xhci_container_ctx *in_ctx, *out_ctx;
1414 struct xhci_input_control_ctx *ctrl_ctx;
1415 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001416 unsigned int last_ctx;
1417 unsigned int ep_index;
1418 struct xhci_ep_ctx *ep_ctx;
1419 u32 drop_flag;
1420 u32 new_add_flags, new_drop_flags, new_slot_info;
1421 int ret;
1422
Andiry Xu64927732010-10-14 07:22:45 -07001423 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001424 if (ret <= 0)
1425 return ret;
1426 xhci = hcd_to_xhci(hcd);
Sarah Sharpfe6c6c12011-05-23 16:41:17 -07001427 if (xhci->xhc_state & XHCI_STATE_DYING)
1428 return -ENODEV;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001429
Sarah Sharpfe6c6c12011-05-23 16:41:17 -07001430 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001431 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1432 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1433 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1434 __func__, drop_flag);
1435 return 0;
1436 }
1437
Sarah Sharpf94e01862009-04-27 19:58:38 -07001438 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
John Yound115b042009-07-27 12:05:15 -07001439 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1440 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001441 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001442 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001443 /* If the HC already knows the endpoint is disabled,
1444 * or the HCD has noted it is disabled, ignore this request
1445 */
Matt Evansf5960b62011-06-01 10:22:55 +10001446 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1447 cpu_to_le32(EP_STATE_DISABLED)) ||
Matt Evans28ccd292011-03-29 13:40:46 +11001448 le32_to_cpu(ctrl_ctx->drop_flags) &
1449 xhci_get_endpoint_flag(&ep->desc)) {
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001450 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1451 __func__, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001452 return 0;
1453 }
1454
Matt Evans28ccd292011-03-29 13:40:46 +11001455 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1456 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001457
Matt Evans28ccd292011-03-29 13:40:46 +11001458 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1459 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001460
Matt Evans28ccd292011-03-29 13:40:46 +11001461 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
John Yound115b042009-07-27 12:05:15 -07001462 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001463 /* Update the last valid endpoint context, if we deleted the last one */
Matt Evans28ccd292011-03-29 13:40:46 +11001464 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1465 LAST_CTX(last_ctx)) {
1466 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1467 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001468 }
Matt Evans28ccd292011-03-29 13:40:46 +11001469 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001470
1471 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1472
Sarah Sharpf94e01862009-04-27 19:58:38 -07001473 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1474 (unsigned int) ep->desc.bEndpointAddress,
1475 udev->slot_id,
1476 (unsigned int) new_drop_flags,
1477 (unsigned int) new_add_flags,
1478 (unsigned int) new_slot_info);
1479 return 0;
1480}
1481
1482/* Add an endpoint to a new possible bandwidth configuration for this device.
1483 * Only one call to this function is allowed per endpoint before
1484 * check_bandwidth() or reset_bandwidth() must be called.
1485 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1486 * add the endpoint to the schedule with possibly new parameters denoted by a
1487 * different endpoint descriptor in usb_host_endpoint.
1488 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1489 * not allowed.
Sarah Sharpf88ba782009-05-14 11:44:22 -07001490 *
1491 * The USB core will not allow URBs to be queued to an endpoint until the
1492 * configuration or alt setting is installed in the device, so there's no need
1493 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
Sarah Sharpf94e01862009-04-27 19:58:38 -07001494 */
1495int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1496 struct usb_host_endpoint *ep)
1497{
Sarah Sharpf94e01862009-04-27 19:58:38 -07001498 struct xhci_hcd *xhci;
John Yound115b042009-07-27 12:05:15 -07001499 struct xhci_container_ctx *in_ctx, *out_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001500 unsigned int ep_index;
1501 struct xhci_ep_ctx *ep_ctx;
John Yound115b042009-07-27 12:05:15 -07001502 struct xhci_slot_ctx *slot_ctx;
1503 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001504 u32 added_ctxs;
1505 unsigned int last_ctx;
1506 u32 new_add_flags, new_drop_flags, new_slot_info;
Sarah Sharpfa75ac32011-06-05 23:10:04 -07001507 struct xhci_virt_device *virt_dev;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001508 int ret = 0;
1509
Andiry Xu64927732010-10-14 07:22:45 -07001510 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001511 if (ret <= 0) {
1512 /* So we won't queue a reset ep command for a root hub */
1513 ep->hcpriv = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001514 return ret;
Sarah Sharpa1587d92009-07-27 12:03:15 -07001515 }
Sarah Sharpf94e01862009-04-27 19:58:38 -07001516 xhci = hcd_to_xhci(hcd);
Sarah Sharpfe6c6c12011-05-23 16:41:17 -07001517 if (xhci->xhc_state & XHCI_STATE_DYING)
1518 return -ENODEV;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001519
1520 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1521 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1522 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1523 /* FIXME when we have to issue an evaluate endpoint command to
1524 * deal with ep0 max packet size changing once we get the
1525 * descriptors
1526 */
1527 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1528 __func__, added_ctxs);
1529 return 0;
1530 }
1531
Sarah Sharpfa75ac32011-06-05 23:10:04 -07001532 virt_dev = xhci->devs[udev->slot_id];
1533 in_ctx = virt_dev->in_ctx;
1534 out_ctx = virt_dev->out_ctx;
John Yound115b042009-07-27 12:05:15 -07001535 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001536 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001537 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Sarah Sharpfa75ac32011-06-05 23:10:04 -07001538
1539 /* If this endpoint is already in use, and the upper layers are trying
1540 * to add it again without dropping it, reject the addition.
1541 */
1542 if (virt_dev->eps[ep_index].ring &&
1543 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1544 xhci_get_endpoint_flag(&ep->desc))) {
1545 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1546 "without dropping it.\n",
1547 (unsigned int) ep->desc.bEndpointAddress);
1548 return -EINVAL;
1549 }
1550
Sarah Sharpf94e01862009-04-27 19:58:38 -07001551 /* If the HCD has already noted the endpoint is enabled,
1552 * ignore this request.
1553 */
Matt Evans28ccd292011-03-29 13:40:46 +11001554 if (le32_to_cpu(ctrl_ctx->add_flags) &
1555 xhci_get_endpoint_flag(&ep->desc)) {
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001556 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1557 __func__, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001558 return 0;
1559 }
1560
Sarah Sharpf88ba782009-05-14 11:44:22 -07001561 /*
1562 * Configuration and alternate setting changes must be done in
1563 * process context, not interrupt context (or so documenation
1564 * for usb_set_interface() and usb_set_configuration() claim).
1565 */
Sarah Sharpfa75ac32011-06-05 23:10:04 -07001566 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
Sarah Sharpf94e01862009-04-27 19:58:38 -07001567 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1568 __func__, ep->desc.bEndpointAddress);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001569 return -ENOMEM;
1570 }
1571
Matt Evans28ccd292011-03-29 13:40:46 +11001572 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1573 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001574
1575 /* If xhci_endpoint_disable() was called for this endpoint, but the
1576 * xHC hasn't been notified yet through the check_bandwidth() call,
1577 * this re-adds a new state for the endpoint from the new endpoint
1578 * descriptors. We must drop and re-add this endpoint, so we leave the
1579 * drop flags alone.
1580 */
Matt Evans28ccd292011-03-29 13:40:46 +11001581 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001582
John Yound115b042009-07-27 12:05:15 -07001583 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001584 /* Update the last valid endpoint context, if we just added one past */
Matt Evans28ccd292011-03-29 13:40:46 +11001585 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1586 LAST_CTX(last_ctx)) {
1587 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1588 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001589 }
Matt Evans28ccd292011-03-29 13:40:46 +11001590 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001591
Sarah Sharpa1587d92009-07-27 12:03:15 -07001592 /* Store the usb_device pointer for later use */
1593 ep->hcpriv = udev;
1594
Sarah Sharpf94e01862009-04-27 19:58:38 -07001595 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1596 (unsigned int) ep->desc.bEndpointAddress,
1597 udev->slot_id,
1598 (unsigned int) new_drop_flags,
1599 (unsigned int) new_add_flags,
1600 (unsigned int) new_slot_info);
1601 return 0;
1602}
1603
John Yound115b042009-07-27 12:05:15 -07001604static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
Sarah Sharpf94e01862009-04-27 19:58:38 -07001605{
John Yound115b042009-07-27 12:05:15 -07001606 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001607 struct xhci_ep_ctx *ep_ctx;
John Yound115b042009-07-27 12:05:15 -07001608 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001609 int i;
1610
1611 /* When a device's add flag and drop flag are zero, any subsequent
1612 * configure endpoint command will leave that endpoint's state
1613 * untouched. Make sure we don't leave any old state in the input
1614 * endpoint contexts.
1615 */
John Yound115b042009-07-27 12:05:15 -07001616 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1617 ctrl_ctx->drop_flags = 0;
1618 ctrl_ctx->add_flags = 0;
1619 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001620 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001621 /* Endpoint 0 is always valid */
Matt Evans28ccd292011-03-29 13:40:46 +11001622 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001623 for (i = 1; i < 31; ++i) {
John Yound115b042009-07-27 12:05:15 -07001624 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001625 ep_ctx->ep_info = 0;
1626 ep_ctx->ep_info2 = 0;
Sarah Sharp8e595a52009-07-27 12:03:31 -07001627 ep_ctx->deq = 0;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001628 ep_ctx->tx_info = 0;
1629 }
1630}
1631
Sarah Sharpf2217e82009-08-07 14:04:43 -07001632static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
Sarah Sharp00161f72011-04-28 12:23:23 -07001633 struct usb_device *udev, u32 *cmd_status)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001634{
1635 int ret;
1636
Sarah Sharp913a8a32009-09-04 10:53:13 -07001637 switch (*cmd_status) {
Sarah Sharpf2217e82009-08-07 14:04:43 -07001638 case COMP_ENOMEM:
1639 dev_warn(&udev->dev, "Not enough host controller resources "
1640 "for new device state.\n");
1641 ret = -ENOMEM;
1642 /* FIXME: can we allocate more resources for the HC? */
1643 break;
1644 case COMP_BW_ERR:
Hans de Goede71d85722012-01-04 23:29:18 +01001645 case COMP_2ND_BW_ERR:
Sarah Sharpf2217e82009-08-07 14:04:43 -07001646 dev_warn(&udev->dev, "Not enough bandwidth "
1647 "for new device state.\n");
1648 ret = -ENOSPC;
1649 /* FIXME: can we go back to the old state? */
1650 break;
1651 case COMP_TRB_ERR:
1652 /* the HCD set up something wrong */
1653 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1654 "add flag = 1, "
1655 "and endpoint is not disabled.\n");
1656 ret = -EINVAL;
1657 break;
Alex Hef6ba6fe2011-06-08 18:34:06 +08001658 case COMP_DEV_ERR:
1659 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1660 "configure command.\n");
1661 ret = -ENODEV;
1662 break;
Sarah Sharpf2217e82009-08-07 14:04:43 -07001663 case COMP_SUCCESS:
1664 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1665 ret = 0;
1666 break;
1667 default:
1668 xhci_err(xhci, "ERROR: unexpected command completion "
Sarah Sharp913a8a32009-09-04 10:53:13 -07001669 "code 0x%x.\n", *cmd_status);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001670 ret = -EINVAL;
1671 break;
1672 }
1673 return ret;
1674}
1675
1676static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
Sarah Sharp00161f72011-04-28 12:23:23 -07001677 struct usb_device *udev, u32 *cmd_status)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001678{
1679 int ret;
Sarah Sharp913a8a32009-09-04 10:53:13 -07001680 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
Sarah Sharpf2217e82009-08-07 14:04:43 -07001681
Sarah Sharp913a8a32009-09-04 10:53:13 -07001682 switch (*cmd_status) {
Sarah Sharpf2217e82009-08-07 14:04:43 -07001683 case COMP_EINVAL:
1684 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1685 "context command.\n");
1686 ret = -EINVAL;
1687 break;
1688 case COMP_EBADSLT:
1689 dev_warn(&udev->dev, "WARN: slot not enabled for"
1690 "evaluate context command.\n");
1691 case COMP_CTX_STATE:
1692 dev_warn(&udev->dev, "WARN: invalid context state for "
1693 "evaluate context command.\n");
1694 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1695 ret = -EINVAL;
1696 break;
Alex Hef6ba6fe2011-06-08 18:34:06 +08001697 case COMP_DEV_ERR:
1698 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1699 "context command.\n");
1700 ret = -ENODEV;
1701 break;
Alex He1bb73a82011-05-05 18:14:12 +08001702 case COMP_MEL_ERR:
1703 /* Max Exit Latency too large error */
1704 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1705 ret = -EINVAL;
1706 break;
Sarah Sharpf2217e82009-08-07 14:04:43 -07001707 case COMP_SUCCESS:
1708 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1709 ret = 0;
1710 break;
1711 default:
1712 xhci_err(xhci, "ERROR: unexpected command completion "
Sarah Sharp913a8a32009-09-04 10:53:13 -07001713 "code 0x%x.\n", *cmd_status);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001714 ret = -EINVAL;
1715 break;
1716 }
1717 return ret;
1718}
1719
Sarah Sharp2cf95c12011-05-11 16:14:58 -07001720static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1721 struct xhci_container_ctx *in_ctx)
1722{
1723 struct xhci_input_control_ctx *ctrl_ctx;
1724 u32 valid_add_flags;
1725 u32 valid_drop_flags;
1726
1727 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1728 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1729 * (bit 1). The default control endpoint is added during the Address
1730 * Device command and is never removed until the slot is disabled.
1731 */
1732 valid_add_flags = ctrl_ctx->add_flags >> 2;
1733 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1734
1735 /* Use hweight32 to count the number of ones in the add flags, or
1736 * number of endpoints added. Don't count endpoints that are changed
1737 * (both added and dropped).
1738 */
1739 return hweight32(valid_add_flags) -
1740 hweight32(valid_add_flags & valid_drop_flags);
1741}
1742
1743static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1744 struct xhci_container_ctx *in_ctx)
1745{
1746 struct xhci_input_control_ctx *ctrl_ctx;
1747 u32 valid_add_flags;
1748 u32 valid_drop_flags;
1749
1750 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1751 valid_add_flags = ctrl_ctx->add_flags >> 2;
1752 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1753
1754 return hweight32(valid_drop_flags) -
1755 hweight32(valid_add_flags & valid_drop_flags);
1756}
1757
1758/*
1759 * We need to reserve the new number of endpoints before the configure endpoint
1760 * command completes. We can't subtract the dropped endpoints from the number
1761 * of active endpoints until the command completes because we can oversubscribe
1762 * the host in this case:
1763 *
1764 * - the first configure endpoint command drops more endpoints than it adds
1765 * - a second configure endpoint command that adds more endpoints is queued
1766 * - the first configure endpoint command fails, so the config is unchanged
1767 * - the second command may succeed, even though there isn't enough resources
1768 *
1769 * Must be called with xhci->lock held.
1770 */
1771static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1772 struct xhci_container_ctx *in_ctx)
1773{
1774 u32 added_eps;
1775
1776 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1777 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1778 xhci_dbg(xhci, "Not enough ep ctxs: "
1779 "%u active, need to add %u, limit is %u.\n",
1780 xhci->num_active_eps, added_eps,
1781 xhci->limit_active_eps);
1782 return -ENOMEM;
1783 }
1784 xhci->num_active_eps += added_eps;
1785 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1786 xhci->num_active_eps);
1787 return 0;
1788}
1789
1790/*
1791 * The configure endpoint was failed by the xHC for some other reason, so we
1792 * need to revert the resources that failed configuration would have used.
1793 *
1794 * Must be called with xhci->lock held.
1795 */
1796static void xhci_free_host_resources(struct xhci_hcd *xhci,
1797 struct xhci_container_ctx *in_ctx)
1798{
1799 u32 num_failed_eps;
1800
1801 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1802 xhci->num_active_eps -= num_failed_eps;
1803 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1804 num_failed_eps,
1805 xhci->num_active_eps);
1806}
1807
1808/*
1809 * Now that the command has completed, clean up the active endpoint count by
1810 * subtracting out the endpoints that were dropped (but not changed).
1811 *
1812 * Must be called with xhci->lock held.
1813 */
1814static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1815 struct xhci_container_ctx *in_ctx)
1816{
1817 u32 num_dropped_eps;
1818
1819 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1820 xhci->num_active_eps -= num_dropped_eps;
1821 if (num_dropped_eps)
1822 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1823 num_dropped_eps,
1824 xhci->num_active_eps);
1825}
1826
Sarah Sharpc29eea62011-09-02 11:05:52 -07001827unsigned int xhci_get_block_size(struct usb_device *udev)
1828{
1829 switch (udev->speed) {
1830 case USB_SPEED_LOW:
1831 case USB_SPEED_FULL:
1832 return FS_BLOCK;
1833 case USB_SPEED_HIGH:
1834 return HS_BLOCK;
1835 case USB_SPEED_SUPER:
1836 return SS_BLOCK;
1837 case USB_SPEED_UNKNOWN:
1838 case USB_SPEED_WIRELESS:
1839 default:
1840 /* Should never happen */
1841 return 1;
1842 }
1843}
1844
1845unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1846{
1847 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1848 return LS_OVERHEAD;
1849 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1850 return FS_OVERHEAD;
1851 return HS_OVERHEAD;
1852}
1853
1854/* If we are changing a LS/FS device under a HS hub,
1855 * make sure (if we are activating a new TT) that the HS bus has enough
1856 * bandwidth for this new TT.
1857 */
1858static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1859 struct xhci_virt_device *virt_dev,
1860 int old_active_eps)
1861{
1862 struct xhci_interval_bw_table *bw_table;
1863 struct xhci_tt_bw_info *tt_info;
1864
1865 /* Find the bandwidth table for the root port this TT is attached to. */
1866 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
1867 tt_info = virt_dev->tt_info;
1868 /* If this TT already had active endpoints, the bandwidth for this TT
1869 * has already been added. Removing all periodic endpoints (and thus
1870 * making the TT enactive) will only decrease the bandwidth used.
1871 */
1872 if (old_active_eps)
1873 return 0;
1874 if (old_active_eps == 0 && tt_info->active_eps != 0) {
1875 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
1876 return -ENOMEM;
1877 return 0;
1878 }
1879 /* Not sure why we would have no new active endpoints...
1880 *
1881 * Maybe because of an Evaluate Context change for a hub update or a
1882 * control endpoint 0 max packet size change?
1883 * FIXME: skip the bandwidth calculation in that case.
1884 */
1885 return 0;
1886}
1887
Sarah Sharp2b698992011-09-13 16:41:13 -07001888static int xhci_check_ss_bw(struct xhci_hcd *xhci,
1889 struct xhci_virt_device *virt_dev)
1890{
1891 unsigned int bw_reserved;
1892
1893 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
1894 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
1895 return -ENOMEM;
1896
1897 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
1898 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
1899 return -ENOMEM;
1900
1901 return 0;
1902}
1903
Sarah Sharpc29eea62011-09-02 11:05:52 -07001904/*
1905 * This algorithm is a very conservative estimate of the worst-case scheduling
1906 * scenario for any one interval. The hardware dynamically schedules the
1907 * packets, so we can't tell which microframe could be the limiting factor in
1908 * the bandwidth scheduling. This only takes into account periodic endpoints.
1909 *
1910 * Obviously, we can't solve an NP complete problem to find the minimum worst
1911 * case scenario. Instead, we come up with an estimate that is no less than
1912 * the worst case bandwidth used for any one microframe, but may be an
1913 * over-estimate.
1914 *
1915 * We walk the requirements for each endpoint by interval, starting with the
1916 * smallest interval, and place packets in the schedule where there is only one
1917 * possible way to schedule packets for that interval. In order to simplify
1918 * this algorithm, we record the largest max packet size for each interval, and
1919 * assume all packets will be that size.
1920 *
1921 * For interval 0, we obviously must schedule all packets for each interval.
1922 * The bandwidth for interval 0 is just the amount of data to be transmitted
1923 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
1924 * the number of packets).
1925 *
1926 * For interval 1, we have two possible microframes to schedule those packets
1927 * in. For this algorithm, if we can schedule the same number of packets for
1928 * each possible scheduling opportunity (each microframe), we will do so. The
1929 * remaining number of packets will be saved to be transmitted in the gaps in
1930 * the next interval's scheduling sequence.
1931 *
1932 * As we move those remaining packets to be scheduled with interval 2 packets,
1933 * we have to double the number of remaining packets to transmit. This is
1934 * because the intervals are actually powers of 2, and we would be transmitting
1935 * the previous interval's packets twice in this interval. We also have to be
1936 * sure that when we look at the largest max packet size for this interval, we
1937 * also look at the largest max packet size for the remaining packets and take
1938 * the greater of the two.
1939 *
1940 * The algorithm continues to evenly distribute packets in each scheduling
1941 * opportunity, and push the remaining packets out, until we get to the last
1942 * interval. Then those packets and their associated overhead are just added
1943 * to the bandwidth used.
Sarah Sharp2e279802011-09-02 11:05:50 -07001944 */
1945static int xhci_check_bw_table(struct xhci_hcd *xhci,
1946 struct xhci_virt_device *virt_dev,
1947 int old_active_eps)
1948{
Sarah Sharpc29eea62011-09-02 11:05:52 -07001949 unsigned int bw_reserved;
1950 unsigned int max_bandwidth;
1951 unsigned int bw_used;
1952 unsigned int block_size;
1953 struct xhci_interval_bw_table *bw_table;
1954 unsigned int packet_size = 0;
1955 unsigned int overhead = 0;
1956 unsigned int packets_transmitted = 0;
1957 unsigned int packets_remaining = 0;
1958 unsigned int i;
1959
Sarah Sharp2b698992011-09-13 16:41:13 -07001960 if (virt_dev->udev->speed == USB_SPEED_SUPER)
1961 return xhci_check_ss_bw(xhci, virt_dev);
1962
Sarah Sharpc29eea62011-09-02 11:05:52 -07001963 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
1964 max_bandwidth = HS_BW_LIMIT;
1965 /* Convert percent of bus BW reserved to blocks reserved */
1966 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
1967 } else {
1968 max_bandwidth = FS_BW_LIMIT;
1969 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
1970 }
1971
1972 bw_table = virt_dev->bw_table;
1973 /* We need to translate the max packet size and max ESIT payloads into
1974 * the units the hardware uses.
1975 */
1976 block_size = xhci_get_block_size(virt_dev->udev);
1977
1978 /* If we are manipulating a LS/FS device under a HS hub, double check
1979 * that the HS bus has enough bandwidth if we are activing a new TT.
1980 */
1981 if (virt_dev->tt_info) {
1982 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1983 virt_dev->real_port);
1984 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
1985 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
1986 "newly activated TT.\n");
1987 return -ENOMEM;
1988 }
1989 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
1990 virt_dev->tt_info->slot_id,
1991 virt_dev->tt_info->ttport);
1992 } else {
1993 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1994 virt_dev->real_port);
1995 }
1996
1997 /* Add in how much bandwidth will be used for interval zero, or the
1998 * rounded max ESIT payload + number of packets * largest overhead.
1999 */
2000 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2001 bw_table->interval_bw[0].num_packets *
2002 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2003
2004 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2005 unsigned int bw_added;
2006 unsigned int largest_mps;
2007 unsigned int interval_overhead;
2008
2009 /*
2010 * How many packets could we transmit in this interval?
2011 * If packets didn't fit in the previous interval, we will need
2012 * to transmit that many packets twice within this interval.
2013 */
2014 packets_remaining = 2 * packets_remaining +
2015 bw_table->interval_bw[i].num_packets;
2016
2017 /* Find the largest max packet size of this or the previous
2018 * interval.
2019 */
2020 if (list_empty(&bw_table->interval_bw[i].endpoints))
2021 largest_mps = 0;
2022 else {
2023 struct xhci_virt_ep *virt_ep;
2024 struct list_head *ep_entry;
2025
2026 ep_entry = bw_table->interval_bw[i].endpoints.next;
2027 virt_ep = list_entry(ep_entry,
2028 struct xhci_virt_ep, bw_endpoint_list);
2029 /* Convert to blocks, rounding up */
2030 largest_mps = DIV_ROUND_UP(
2031 virt_ep->bw_info.max_packet_size,
2032 block_size);
2033 }
2034 if (largest_mps > packet_size)
2035 packet_size = largest_mps;
2036
2037 /* Use the larger overhead of this or the previous interval. */
2038 interval_overhead = xhci_get_largest_overhead(
2039 &bw_table->interval_bw[i]);
2040 if (interval_overhead > overhead)
2041 overhead = interval_overhead;
2042
2043 /* How many packets can we evenly distribute across
2044 * (1 << (i + 1)) possible scheduling opportunities?
2045 */
2046 packets_transmitted = packets_remaining >> (i + 1);
2047
2048 /* Add in the bandwidth used for those scheduled packets */
2049 bw_added = packets_transmitted * (overhead + packet_size);
2050
2051 /* How many packets do we have remaining to transmit? */
2052 packets_remaining = packets_remaining % (1 << (i + 1));
2053
2054 /* What largest max packet size should those packets have? */
2055 /* If we've transmitted all packets, don't carry over the
2056 * largest packet size.
2057 */
2058 if (packets_remaining == 0) {
2059 packet_size = 0;
2060 overhead = 0;
2061 } else if (packets_transmitted > 0) {
2062 /* Otherwise if we do have remaining packets, and we've
2063 * scheduled some packets in this interval, take the
2064 * largest max packet size from endpoints with this
2065 * interval.
2066 */
2067 packet_size = largest_mps;
2068 overhead = interval_overhead;
2069 }
2070 /* Otherwise carry over packet_size and overhead from the last
2071 * time we had a remainder.
2072 */
2073 bw_used += bw_added;
2074 if (bw_used > max_bandwidth) {
2075 xhci_warn(xhci, "Not enough bandwidth. "
2076 "Proposed: %u, Max: %u\n",
2077 bw_used, max_bandwidth);
2078 return -ENOMEM;
2079 }
2080 }
2081 /*
2082 * Ok, we know we have some packets left over after even-handedly
2083 * scheduling interval 15. We don't know which microframes they will
2084 * fit into, so we over-schedule and say they will be scheduled every
2085 * microframe.
2086 */
2087 if (packets_remaining > 0)
2088 bw_used += overhead + packet_size;
2089
2090 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2091 unsigned int port_index = virt_dev->real_port - 1;
2092
2093 /* OK, we're manipulating a HS device attached to a
2094 * root port bandwidth domain. Include the number of active TTs
2095 * in the bandwidth used.
2096 */
2097 bw_used += TT_HS_OVERHEAD *
2098 xhci->rh_bw[port_index].num_active_tts;
2099 }
2100
2101 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2102 "Available: %u " "percent\n",
2103 bw_used, max_bandwidth, bw_reserved,
2104 (max_bandwidth - bw_used - bw_reserved) * 100 /
2105 max_bandwidth);
2106
2107 bw_used += bw_reserved;
2108 if (bw_used > max_bandwidth) {
2109 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2110 bw_used, max_bandwidth);
2111 return -ENOMEM;
2112 }
2113
2114 bw_table->bw_used = bw_used;
Sarah Sharp2e279802011-09-02 11:05:50 -07002115 return 0;
2116}
2117
2118static bool xhci_is_async_ep(unsigned int ep_type)
2119{
2120 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2121 ep_type != ISOC_IN_EP &&
2122 ep_type != INT_IN_EP);
2123}
2124
Sarah Sharp2b698992011-09-13 16:41:13 -07002125static bool xhci_is_sync_in_ep(unsigned int ep_type)
2126{
2127 return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
2128}
2129
2130static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2131{
2132 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2133
2134 if (ep_bw->ep_interval == 0)
2135 return SS_OVERHEAD_BURST +
2136 (ep_bw->mult * ep_bw->num_packets *
2137 (SS_OVERHEAD + mps));
2138 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2139 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2140 1 << ep_bw->ep_interval);
2141
2142}
2143
Sarah Sharp2e279802011-09-02 11:05:50 -07002144void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2145 struct xhci_bw_info *ep_bw,
2146 struct xhci_interval_bw_table *bw_table,
2147 struct usb_device *udev,
2148 struct xhci_virt_ep *virt_ep,
2149 struct xhci_tt_bw_info *tt_info)
2150{
2151 struct xhci_interval_bw *interval_bw;
2152 int normalized_interval;
2153
Sarah Sharp2b698992011-09-13 16:41:13 -07002154 if (xhci_is_async_ep(ep_bw->type))
Sarah Sharp2e279802011-09-02 11:05:50 -07002155 return;
2156
Sarah Sharp2b698992011-09-13 16:41:13 -07002157 if (udev->speed == USB_SPEED_SUPER) {
2158 if (xhci_is_sync_in_ep(ep_bw->type))
2159 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2160 xhci_get_ss_bw_consumed(ep_bw);
2161 else
2162 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2163 xhci_get_ss_bw_consumed(ep_bw);
2164 return;
2165 }
2166
2167 /* SuperSpeed endpoints never get added to intervals in the table, so
2168 * this check is only valid for HS/FS/LS devices.
2169 */
2170 if (list_empty(&virt_ep->bw_endpoint_list))
2171 return;
Sarah Sharp2e279802011-09-02 11:05:50 -07002172 /* For LS/FS devices, we need to translate the interval expressed in
2173 * microframes to frames.
2174 */
2175 if (udev->speed == USB_SPEED_HIGH)
2176 normalized_interval = ep_bw->ep_interval;
2177 else
2178 normalized_interval = ep_bw->ep_interval - 3;
2179
2180 if (normalized_interval == 0)
2181 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2182 interval_bw = &bw_table->interval_bw[normalized_interval];
2183 interval_bw->num_packets -= ep_bw->num_packets;
2184 switch (udev->speed) {
2185 case USB_SPEED_LOW:
2186 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2187 break;
2188 case USB_SPEED_FULL:
2189 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2190 break;
2191 case USB_SPEED_HIGH:
2192 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2193 break;
2194 case USB_SPEED_SUPER:
2195 case USB_SPEED_UNKNOWN:
2196 case USB_SPEED_WIRELESS:
2197 /* Should never happen because only LS/FS/HS endpoints will get
2198 * added to the endpoint list.
2199 */
2200 return;
2201 }
2202 if (tt_info)
2203 tt_info->active_eps -= 1;
2204 list_del_init(&virt_ep->bw_endpoint_list);
2205}
2206
2207static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2208 struct xhci_bw_info *ep_bw,
2209 struct xhci_interval_bw_table *bw_table,
2210 struct usb_device *udev,
2211 struct xhci_virt_ep *virt_ep,
2212 struct xhci_tt_bw_info *tt_info)
2213{
2214 struct xhci_interval_bw *interval_bw;
2215 struct xhci_virt_ep *smaller_ep;
2216 int normalized_interval;
2217
2218 if (xhci_is_async_ep(ep_bw->type))
2219 return;
2220
Sarah Sharp2b698992011-09-13 16:41:13 -07002221 if (udev->speed == USB_SPEED_SUPER) {
2222 if (xhci_is_sync_in_ep(ep_bw->type))
2223 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2224 xhci_get_ss_bw_consumed(ep_bw);
2225 else
2226 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2227 xhci_get_ss_bw_consumed(ep_bw);
2228 return;
2229 }
2230
Sarah Sharp2e279802011-09-02 11:05:50 -07002231 /* For LS/FS devices, we need to translate the interval expressed in
2232 * microframes to frames.
2233 */
2234 if (udev->speed == USB_SPEED_HIGH)
2235 normalized_interval = ep_bw->ep_interval;
2236 else
2237 normalized_interval = ep_bw->ep_interval - 3;
2238
2239 if (normalized_interval == 0)
2240 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2241 interval_bw = &bw_table->interval_bw[normalized_interval];
2242 interval_bw->num_packets += ep_bw->num_packets;
2243 switch (udev->speed) {
2244 case USB_SPEED_LOW:
2245 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2246 break;
2247 case USB_SPEED_FULL:
2248 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2249 break;
2250 case USB_SPEED_HIGH:
2251 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2252 break;
2253 case USB_SPEED_SUPER:
2254 case USB_SPEED_UNKNOWN:
2255 case USB_SPEED_WIRELESS:
2256 /* Should never happen because only LS/FS/HS endpoints will get
2257 * added to the endpoint list.
2258 */
2259 return;
2260 }
2261
2262 if (tt_info)
2263 tt_info->active_eps += 1;
2264 /* Insert the endpoint into the list, largest max packet size first. */
2265 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2266 bw_endpoint_list) {
2267 if (ep_bw->max_packet_size >=
2268 smaller_ep->bw_info.max_packet_size) {
2269 /* Add the new ep before the smaller endpoint */
2270 list_add_tail(&virt_ep->bw_endpoint_list,
2271 &smaller_ep->bw_endpoint_list);
2272 return;
2273 }
2274 }
2275 /* Add the new endpoint at the end of the list. */
2276 list_add_tail(&virt_ep->bw_endpoint_list,
2277 &interval_bw->endpoints);
2278}
2279
2280void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2281 struct xhci_virt_device *virt_dev,
2282 int old_active_eps)
2283{
2284 struct xhci_root_port_bw_info *rh_bw_info;
2285 if (!virt_dev->tt_info)
2286 return;
2287
2288 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2289 if (old_active_eps == 0 &&
2290 virt_dev->tt_info->active_eps != 0) {
2291 rh_bw_info->num_active_tts += 1;
Sarah Sharpc29eea62011-09-02 11:05:52 -07002292 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
Sarah Sharp2e279802011-09-02 11:05:50 -07002293 } else if (old_active_eps != 0 &&
2294 virt_dev->tt_info->active_eps == 0) {
2295 rh_bw_info->num_active_tts -= 1;
Sarah Sharpc29eea62011-09-02 11:05:52 -07002296 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
Sarah Sharp2e279802011-09-02 11:05:50 -07002297 }
2298}
2299
2300static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2301 struct xhci_virt_device *virt_dev,
2302 struct xhci_container_ctx *in_ctx)
2303{
2304 struct xhci_bw_info ep_bw_info[31];
2305 int i;
2306 struct xhci_input_control_ctx *ctrl_ctx;
2307 int old_active_eps = 0;
2308
Sarah Sharp2e279802011-09-02 11:05:50 -07002309 if (virt_dev->tt_info)
2310 old_active_eps = virt_dev->tt_info->active_eps;
2311
2312 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2313
2314 for (i = 0; i < 31; i++) {
2315 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2316 continue;
2317
2318 /* Make a copy of the BW info in case we need to revert this */
2319 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2320 sizeof(ep_bw_info[i]));
2321 /* Drop the endpoint from the interval table if the endpoint is
2322 * being dropped or changed.
2323 */
2324 if (EP_IS_DROPPED(ctrl_ctx, i))
2325 xhci_drop_ep_from_interval_table(xhci,
2326 &virt_dev->eps[i].bw_info,
2327 virt_dev->bw_table,
2328 virt_dev->udev,
2329 &virt_dev->eps[i],
2330 virt_dev->tt_info);
2331 }
2332 /* Overwrite the information stored in the endpoints' bw_info */
2333 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2334 for (i = 0; i < 31; i++) {
2335 /* Add any changed or added endpoints to the interval table */
2336 if (EP_IS_ADDED(ctrl_ctx, i))
2337 xhci_add_ep_to_interval_table(xhci,
2338 &virt_dev->eps[i].bw_info,
2339 virt_dev->bw_table,
2340 virt_dev->udev,
2341 &virt_dev->eps[i],
2342 virt_dev->tt_info);
2343 }
2344
2345 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2346 /* Ok, this fits in the bandwidth we have.
2347 * Update the number of active TTs.
2348 */
2349 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2350 return 0;
2351 }
2352
2353 /* We don't have enough bandwidth for this, revert the stored info. */
2354 for (i = 0; i < 31; i++) {
2355 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2356 continue;
2357
2358 /* Drop the new copies of any added or changed endpoints from
2359 * the interval table.
2360 */
2361 if (EP_IS_ADDED(ctrl_ctx, i)) {
2362 xhci_drop_ep_from_interval_table(xhci,
2363 &virt_dev->eps[i].bw_info,
2364 virt_dev->bw_table,
2365 virt_dev->udev,
2366 &virt_dev->eps[i],
2367 virt_dev->tt_info);
2368 }
2369 /* Revert the endpoint back to its old information */
2370 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2371 sizeof(ep_bw_info[i]));
2372 /* Add any changed or dropped endpoints back into the table */
2373 if (EP_IS_DROPPED(ctrl_ctx, i))
2374 xhci_add_ep_to_interval_table(xhci,
2375 &virt_dev->eps[i].bw_info,
2376 virt_dev->bw_table,
2377 virt_dev->udev,
2378 &virt_dev->eps[i],
2379 virt_dev->tt_info);
2380 }
2381 return -ENOMEM;
2382}
2383
2384
Sarah Sharpf2217e82009-08-07 14:04:43 -07002385/* Issue a configure endpoint command or evaluate context command
2386 * and wait for it to finish.
2387 */
2388static int xhci_configure_endpoint(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07002389 struct usb_device *udev,
2390 struct xhci_command *command,
2391 bool ctx_change, bool must_succeed)
Sarah Sharpf2217e82009-08-07 14:04:43 -07002392{
2393 int ret;
2394 int timeleft;
2395 unsigned long flags;
Sarah Sharp913a8a32009-09-04 10:53:13 -07002396 struct xhci_container_ctx *in_ctx;
2397 struct completion *cmd_completion;
Matt Evans28ccd292011-03-29 13:40:46 +11002398 u32 *cmd_status;
Sarah Sharp913a8a32009-09-04 10:53:13 -07002399 struct xhci_virt_device *virt_dev;
Sarah Sharpf2217e82009-08-07 14:04:43 -07002400
2401 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp913a8a32009-09-04 10:53:13 -07002402 virt_dev = xhci->devs[udev->slot_id];
Sarah Sharp2cf95c12011-05-11 16:14:58 -07002403
Sarah Sharp750645f2011-09-02 11:05:43 -07002404 if (command)
2405 in_ctx = command->in_ctx;
2406 else
2407 in_ctx = virt_dev->in_ctx;
2408
2409 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2410 xhci_reserve_host_resources(xhci, in_ctx)) {
2411 spin_unlock_irqrestore(&xhci->lock, flags);
2412 xhci_warn(xhci, "Not enough host resources, "
2413 "active endpoint contexts = %u\n",
2414 xhci->num_active_eps);
2415 return -ENOMEM;
2416 }
Sarah Sharp2e279802011-09-02 11:05:50 -07002417 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2418 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2419 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2420 xhci_free_host_resources(xhci, in_ctx);
2421 spin_unlock_irqrestore(&xhci->lock, flags);
2422 xhci_warn(xhci, "Not enough bandwidth\n");
2423 return -ENOMEM;
2424 }
Sarah Sharp750645f2011-09-02 11:05:43 -07002425
2426 if (command) {
Sarah Sharp913a8a32009-09-04 10:53:13 -07002427 cmd_completion = command->completion;
2428 cmd_status = &command->status;
2429 command->command_trb = xhci->cmd_ring->enqueue;
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08002430
2431 /* Enqueue pointer can be left pointing to the link TRB,
2432 * we must handle that
2433 */
Matt Evansf5960b62011-06-01 10:22:55 +10002434 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08002435 command->command_trb =
2436 xhci->cmd_ring->enq_seg->next->trbs;
2437
Sarah Sharp913a8a32009-09-04 10:53:13 -07002438 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2439 } else {
Sarah Sharp913a8a32009-09-04 10:53:13 -07002440 cmd_completion = &virt_dev->cmd_completion;
2441 cmd_status = &virt_dev->cmd_status;
2442 }
Andiry Xu1d680642010-03-12 17:10:04 +08002443 init_completion(cmd_completion);
Sarah Sharp913a8a32009-09-04 10:53:13 -07002444
Sarah Sharpf2217e82009-08-07 14:04:43 -07002445 if (!ctx_change)
Sarah Sharp913a8a32009-09-04 10:53:13 -07002446 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2447 udev->slot_id, must_succeed);
Sarah Sharpf2217e82009-08-07 14:04:43 -07002448 else
Sarah Sharp913a8a32009-09-04 10:53:13 -07002449 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
Sarah Sharp4b266542012-05-07 15:34:26 -07002450 udev->slot_id, must_succeed);
Sarah Sharpf2217e82009-08-07 14:04:43 -07002451 if (ret < 0) {
Sarah Sharpc01591b2009-12-09 15:58:58 -08002452 if (command)
2453 list_del(&command->cmd_list);
Sarah Sharp2cf95c12011-05-11 16:14:58 -07002454 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2455 xhci_free_host_resources(xhci, in_ctx);
Sarah Sharpf2217e82009-08-07 14:04:43 -07002456 spin_unlock_irqrestore(&xhci->lock, flags);
2457 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2458 return -ENOMEM;
2459 }
2460 xhci_ring_cmd_db(xhci);
2461 spin_unlock_irqrestore(&xhci->lock, flags);
2462
2463 /* Wait for the configure endpoint command to complete */
2464 timeleft = wait_for_completion_interruptible_timeout(
Sarah Sharp913a8a32009-09-04 10:53:13 -07002465 cmd_completion,
Sarah Sharpf2217e82009-08-07 14:04:43 -07002466 USB_CTRL_SET_TIMEOUT);
2467 if (timeleft <= 0) {
2468 xhci_warn(xhci, "%s while waiting for %s command\n",
2469 timeleft == 0 ? "Timeout" : "Signal",
2470 ctx_change == 0 ?
2471 "configure endpoint" :
2472 "evaluate context");
2473 /* FIXME cancel the configure endpoint command */
2474 return -ETIME;
2475 }
2476
2477 if (!ctx_change)
Sarah Sharp2cf95c12011-05-11 16:14:58 -07002478 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2479 else
2480 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2481
2482 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2483 spin_lock_irqsave(&xhci->lock, flags);
2484 /* If the command failed, remove the reserved resources.
2485 * Otherwise, clean up the estimate to include dropped eps.
2486 */
2487 if (ret)
2488 xhci_free_host_resources(xhci, in_ctx);
2489 else
2490 xhci_finish_resource_reservation(xhci, in_ctx);
2491 spin_unlock_irqrestore(&xhci->lock, flags);
2492 }
2493 return ret;
Sarah Sharpf2217e82009-08-07 14:04:43 -07002494}
2495
Sarah Sharpf88ba782009-05-14 11:44:22 -07002496/* Called after one or more calls to xhci_add_endpoint() or
2497 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2498 * to call xhci_reset_bandwidth().
2499 *
2500 * Since we are in the middle of changing either configuration or
2501 * installing a new alt setting, the USB core won't allow URBs to be
2502 * enqueued for any endpoint on the old config or interface. Nothing
2503 * else should be touching the xhci->devs[slot_id] structure, so we
2504 * don't need to take the xhci->lock for manipulating that.
2505 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07002506int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2507{
2508 int i;
2509 int ret = 0;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002510 struct xhci_hcd *xhci;
2511 struct xhci_virt_device *virt_dev;
John Yound115b042009-07-27 12:05:15 -07002512 struct xhci_input_control_ctx *ctrl_ctx;
2513 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002514
Andiry Xu64927732010-10-14 07:22:45 -07002515 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002516 if (ret <= 0)
2517 return ret;
2518 xhci = hcd_to_xhci(hcd);
Sarah Sharpfe6c6c12011-05-23 16:41:17 -07002519 if (xhci->xhc_state & XHCI_STATE_DYING)
2520 return -ENODEV;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002521
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002522 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002523 virt_dev = xhci->devs[udev->slot_id];
2524
2525 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
John Yound115b042009-07-27 12:05:15 -07002526 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11002527 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2528 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2529 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
Sarah Sharp2dc37532011-09-02 11:05:40 -07002530
2531 /* Don't issue the command if there's no endpoints to update. */
2532 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2533 ctrl_ctx->drop_flags == 0)
2534 return 0;
2535
Sarah Sharpf94e01862009-04-27 19:58:38 -07002536 xhci_dbg(xhci, "New Input Control Context:\n");
John Yound115b042009-07-27 12:05:15 -07002537 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2538 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
Matt Evans28ccd292011-03-29 13:40:46 +11002539 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
Sarah Sharpf94e01862009-04-27 19:58:38 -07002540
Sarah Sharp913a8a32009-09-04 10:53:13 -07002541 ret = xhci_configure_endpoint(xhci, udev, NULL,
2542 false, false);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002543 if (ret) {
2544 /* Callee should call reset_bandwidth() */
Sarah Sharpf94e01862009-04-27 19:58:38 -07002545 return ret;
2546 }
2547
2548 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
John Yound115b042009-07-27 12:05:15 -07002549 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
Matt Evans28ccd292011-03-29 13:40:46 +11002550 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
Sarah Sharpf94e01862009-04-27 19:58:38 -07002551
Sarah Sharp834cb0f2011-05-12 18:06:37 -07002552 /* Free any rings that were dropped, but not changed. */
2553 for (i = 1; i < 31; ++i) {
Matt Evans4819fef2011-06-01 13:01:07 +10002554 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2555 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
Sarah Sharp834cb0f2011-05-12 18:06:37 -07002556 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2557 }
John Yound115b042009-07-27 12:05:15 -07002558 xhci_zero_in_ctx(xhci, virt_dev);
Sarah Sharp834cb0f2011-05-12 18:06:37 -07002559 /*
2560 * Install any rings for completely new endpoints or changed endpoints,
2561 * and free or cache any old rings from changed endpoints.
2562 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07002563 for (i = 1; i < 31; ++i) {
Sarah Sharp74f9fe22009-12-03 09:44:29 -08002564 if (!virt_dev->eps[i].new_ring)
2565 continue;
2566 /* Only cache or free the old ring if it exists.
2567 * It may not if this is the first add of an endpoint.
2568 */
2569 if (virt_dev->eps[i].ring) {
Sarah Sharp412566b2009-12-09 15:59:01 -08002570 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002571 }
Sarah Sharp74f9fe22009-12-03 09:44:29 -08002572 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2573 virt_dev->eps[i].new_ring = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002574 }
2575
Sarah Sharpf94e01862009-04-27 19:58:38 -07002576 return ret;
2577}
2578
2579void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2580{
Sarah Sharpf94e01862009-04-27 19:58:38 -07002581 struct xhci_hcd *xhci;
2582 struct xhci_virt_device *virt_dev;
2583 int i, ret;
2584
Andiry Xu64927732010-10-14 07:22:45 -07002585 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002586 if (ret <= 0)
2587 return;
2588 xhci = hcd_to_xhci(hcd);
2589
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002590 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002591 virt_dev = xhci->devs[udev->slot_id];
2592 /* Free any rings allocated for added endpoints */
2593 for (i = 0; i < 31; ++i) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002594 if (virt_dev->eps[i].new_ring) {
2595 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2596 virt_dev->eps[i].new_ring = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002597 }
2598 }
John Yound115b042009-07-27 12:05:15 -07002599 xhci_zero_in_ctx(xhci, virt_dev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002600}
2601
Sarah Sharp5270b952009-09-04 10:53:11 -07002602static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07002603 struct xhci_container_ctx *in_ctx,
2604 struct xhci_container_ctx *out_ctx,
2605 u32 add_flags, u32 drop_flags)
Sarah Sharp5270b952009-09-04 10:53:11 -07002606{
2607 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharp913a8a32009-09-04 10:53:13 -07002608 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11002609 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2610 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
Sarah Sharp913a8a32009-09-04 10:53:13 -07002611 xhci_slot_copy(xhci, in_ctx, out_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11002612 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
Sarah Sharp5270b952009-09-04 10:53:11 -07002613
Sarah Sharp913a8a32009-09-04 10:53:13 -07002614 xhci_dbg(xhci, "Input Context:\n");
2615 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
Sarah Sharp5270b952009-09-04 10:53:11 -07002616}
2617
Dmitry Torokhov8212a492011-02-08 13:55:59 -08002618static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002619 unsigned int slot_id, unsigned int ep_index,
2620 struct xhci_dequeue_state *deq_state)
2621{
2622 struct xhci_container_ctx *in_ctx;
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002623 struct xhci_ep_ctx *ep_ctx;
2624 u32 added_ctxs;
2625 dma_addr_t addr;
2626
Sarah Sharp913a8a32009-09-04 10:53:13 -07002627 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2628 xhci->devs[slot_id]->out_ctx, ep_index);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002629 in_ctx = xhci->devs[slot_id]->in_ctx;
2630 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2631 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2632 deq_state->new_deq_ptr);
2633 if (addr == 0) {
2634 xhci_warn(xhci, "WARN Cannot submit config ep after "
2635 "reset ep command\n");
2636 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2637 deq_state->new_deq_seg,
2638 deq_state->new_deq_ptr);
2639 return;
2640 }
Matt Evans28ccd292011-03-29 13:40:46 +11002641 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002642
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002643 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
Sarah Sharp913a8a32009-09-04 10:53:13 -07002644 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2645 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002646}
2647
Sarah Sharp82d10092009-08-07 14:04:52 -07002648void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002649 struct usb_device *udev, unsigned int ep_index)
Sarah Sharp82d10092009-08-07 14:04:52 -07002650{
2651 struct xhci_dequeue_state deq_state;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002652 struct xhci_virt_ep *ep;
Sarah Sharp82d10092009-08-07 14:04:52 -07002653
2654 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002655 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
Sarah Sharp82d10092009-08-07 14:04:52 -07002656 /* We need to move the HW's dequeue pointer past this TD,
2657 * or it will attempt to resend it on the next doorbell ring.
2658 */
2659 xhci_find_new_dequeue_state(xhci, udev->slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002660 ep_index, ep->stopped_stream, ep->stopped_td,
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002661 &deq_state);
Sarah Sharp82d10092009-08-07 14:04:52 -07002662
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002663 /* HW with the reset endpoint quirk will use the saved dequeue state to
2664 * issue a configure endpoint command later.
2665 */
2666 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2667 xhci_dbg(xhci, "Queueing new dequeue state\n");
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002668 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002669 ep_index, ep->stopped_stream, &deq_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002670 } else {
2671 /* Better hope no one uses the input context between now and the
2672 * reset endpoint completion!
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002673 * XXX: No idea how this hardware will react when stream rings
2674 * are enabled.
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002675 */
2676 xhci_dbg(xhci, "Setting up input context for "
2677 "configure endpoint command\n");
2678 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2679 ep_index, &deq_state);
2680 }
Sarah Sharp82d10092009-08-07 14:04:52 -07002681}
2682
Sarah Sharpa1587d92009-07-27 12:03:15 -07002683/* Deal with stalled endpoints. The core should have sent the control message
2684 * to clear the halt condition. However, we need to make the xHCI hardware
2685 * reset its sequence number, since a device will expect a sequence number of
2686 * zero after the halt condition is cleared.
2687 * Context: in_interrupt
2688 */
2689void xhci_endpoint_reset(struct usb_hcd *hcd,
2690 struct usb_host_endpoint *ep)
2691{
2692 struct xhci_hcd *xhci;
2693 struct usb_device *udev;
2694 unsigned int ep_index;
2695 unsigned long flags;
2696 int ret;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002697 struct xhci_virt_ep *virt_ep;
Sarah Sharpa1587d92009-07-27 12:03:15 -07002698
2699 xhci = hcd_to_xhci(hcd);
2700 udev = (struct usb_device *) ep->hcpriv;
2701 /* Called with a root hub endpoint (or an endpoint that wasn't added
2702 * with xhci_add_endpoint()
2703 */
2704 if (!ep->hcpriv)
2705 return;
2706 ep_index = xhci_get_endpoint_index(&ep->desc);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002707 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2708 if (!virt_ep->stopped_td) {
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002709 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2710 ep->desc.bEndpointAddress);
2711 return;
2712 }
Sarah Sharp82d10092009-08-07 14:04:52 -07002713 if (usb_endpoint_xfer_control(&ep->desc)) {
2714 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2715 return;
2716 }
Sarah Sharpa1587d92009-07-27 12:03:15 -07002717
2718 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2719 spin_lock_irqsave(&xhci->lock, flags);
2720 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002721 /*
2722 * Can't change the ring dequeue pointer until it's transitioned to the
2723 * stopped state, which is only upon a successful reset endpoint
2724 * command. Better hope that last command worked!
2725 */
Sarah Sharpa1587d92009-07-27 12:03:15 -07002726 if (!ret) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002727 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2728 kfree(virt_ep->stopped_td);
Sarah Sharpa1587d92009-07-27 12:03:15 -07002729 xhci_ring_cmd_db(xhci);
2730 }
Sarah Sharp1624ae12010-05-06 13:40:08 -07002731 virt_ep->stopped_td = NULL;
2732 virt_ep->stopped_trb = NULL;
Sarah Sharp5e5cf6f2010-05-06 13:40:18 -07002733 virt_ep->stopped_stream = 0;
Sarah Sharpa1587d92009-07-27 12:03:15 -07002734 spin_unlock_irqrestore(&xhci->lock, flags);
2735
2736 if (ret)
2737 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2738}
2739
Sarah Sharp8df75f42010-04-02 15:34:16 -07002740static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2741 struct usb_device *udev, struct usb_host_endpoint *ep,
2742 unsigned int slot_id)
2743{
2744 int ret;
2745 unsigned int ep_index;
2746 unsigned int ep_state;
2747
2748 if (!ep)
2749 return -EINVAL;
Andiry Xu64927732010-10-14 07:22:45 -07002750 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
Sarah Sharp8df75f42010-04-02 15:34:16 -07002751 if (ret <= 0)
2752 return -EINVAL;
Alan Stern842f1692010-04-30 12:44:46 -04002753 if (ep->ss_ep_comp.bmAttributes == 0) {
Sarah Sharp8df75f42010-04-02 15:34:16 -07002754 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2755 " descriptor for ep 0x%x does not support streams\n",
2756 ep->desc.bEndpointAddress);
2757 return -EINVAL;
2758 }
2759
2760 ep_index = xhci_get_endpoint_index(&ep->desc);
2761 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2762 if (ep_state & EP_HAS_STREAMS ||
2763 ep_state & EP_GETTING_STREAMS) {
2764 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2765 "already has streams set up.\n",
2766 ep->desc.bEndpointAddress);
2767 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2768 "dynamic stream context array reallocation.\n");
2769 return -EINVAL;
2770 }
2771 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2772 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2773 "endpoint 0x%x; URBs are pending.\n",
2774 ep->desc.bEndpointAddress);
2775 return -EINVAL;
2776 }
2777 return 0;
2778}
2779
2780static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2781 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2782{
2783 unsigned int max_streams;
2784
2785 /* The stream context array size must be a power of two */
2786 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2787 /*
2788 * Find out how many primary stream array entries the host controller
2789 * supports. Later we may use secondary stream arrays (similar to 2nd
2790 * level page entries), but that's an optional feature for xHCI host
2791 * controllers. xHCs must support at least 4 stream IDs.
2792 */
2793 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2794 if (*num_stream_ctxs > max_streams) {
2795 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2796 max_streams);
2797 *num_stream_ctxs = max_streams;
2798 *num_streams = max_streams;
2799 }
2800}
2801
2802/* Returns an error code if one of the endpoint already has streams.
2803 * This does not change any data structures, it only checks and gathers
2804 * information.
2805 */
2806static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2807 struct usb_device *udev,
2808 struct usb_host_endpoint **eps, unsigned int num_eps,
2809 unsigned int *num_streams, u32 *changed_ep_bitmask)
2810{
Sarah Sharp8df75f42010-04-02 15:34:16 -07002811 unsigned int max_streams;
2812 unsigned int endpoint_flag;
2813 int i;
2814 int ret;
2815
2816 for (i = 0; i < num_eps; i++) {
2817 ret = xhci_check_streams_endpoint(xhci, udev,
2818 eps[i], udev->slot_id);
2819 if (ret < 0)
2820 return ret;
2821
Felipe Balbi18b7ede2012-01-02 13:35:41 +02002822 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
Sarah Sharp8df75f42010-04-02 15:34:16 -07002823 if (max_streams < (*num_streams - 1)) {
2824 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2825 eps[i]->desc.bEndpointAddress,
2826 max_streams);
2827 *num_streams = max_streams+1;
2828 }
2829
2830 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2831 if (*changed_ep_bitmask & endpoint_flag)
2832 return -EINVAL;
2833 *changed_ep_bitmask |= endpoint_flag;
2834 }
2835 return 0;
2836}
2837
2838static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2839 struct usb_device *udev,
2840 struct usb_host_endpoint **eps, unsigned int num_eps)
2841{
2842 u32 changed_ep_bitmask = 0;
2843 unsigned int slot_id;
2844 unsigned int ep_index;
2845 unsigned int ep_state;
2846 int i;
2847
2848 slot_id = udev->slot_id;
2849 if (!xhci->devs[slot_id])
2850 return 0;
2851
2852 for (i = 0; i < num_eps; i++) {
2853 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2854 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2855 /* Are streams already being freed for the endpoint? */
2856 if (ep_state & EP_GETTING_NO_STREAMS) {
2857 xhci_warn(xhci, "WARN Can't disable streams for "
2858 "endpoint 0x%x\n, "
2859 "streams are being disabled already.",
2860 eps[i]->desc.bEndpointAddress);
2861 return 0;
2862 }
2863 /* Are there actually any streams to free? */
2864 if (!(ep_state & EP_HAS_STREAMS) &&
2865 !(ep_state & EP_GETTING_STREAMS)) {
2866 xhci_warn(xhci, "WARN Can't disable streams for "
2867 "endpoint 0x%x\n, "
2868 "streams are already disabled!",
2869 eps[i]->desc.bEndpointAddress);
2870 xhci_warn(xhci, "WARN xhci_free_streams() called "
2871 "with non-streams endpoint\n");
2872 return 0;
2873 }
2874 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
2875 }
2876 return changed_ep_bitmask;
2877}
2878
2879/*
2880 * The USB device drivers use this function (though the HCD interface in USB
2881 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
2882 * coordinate mass storage command queueing across multiple endpoints (basically
2883 * a stream ID == a task ID).
2884 *
2885 * Setting up streams involves allocating the same size stream context array
2886 * for each endpoint and issuing a configure endpoint command for all endpoints.
2887 *
2888 * Don't allow the call to succeed if one endpoint only supports one stream
2889 * (which means it doesn't support streams at all).
2890 *
2891 * Drivers may get less stream IDs than they asked for, if the host controller
2892 * hardware or endpoints claim they can't support the number of requested
2893 * stream IDs.
2894 */
2895int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
2896 struct usb_host_endpoint **eps, unsigned int num_eps,
2897 unsigned int num_streams, gfp_t mem_flags)
2898{
2899 int i, ret;
2900 struct xhci_hcd *xhci;
2901 struct xhci_virt_device *vdev;
2902 struct xhci_command *config_cmd;
2903 unsigned int ep_index;
2904 unsigned int num_stream_ctxs;
2905 unsigned long flags;
2906 u32 changed_ep_bitmask = 0;
2907
2908 if (!eps)
2909 return -EINVAL;
2910
2911 /* Add one to the number of streams requested to account for
2912 * stream 0 that is reserved for xHCI usage.
2913 */
2914 num_streams += 1;
2915 xhci = hcd_to_xhci(hcd);
2916 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2917 num_streams);
2918
2919 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2920 if (!config_cmd) {
2921 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2922 return -ENOMEM;
2923 }
2924
2925 /* Check to make sure all endpoints are not already configured for
2926 * streams. While we're at it, find the maximum number of streams that
2927 * all the endpoints will support and check for duplicate endpoints.
2928 */
2929 spin_lock_irqsave(&xhci->lock, flags);
2930 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2931 num_eps, &num_streams, &changed_ep_bitmask);
2932 if (ret < 0) {
2933 xhci_free_command(xhci, config_cmd);
2934 spin_unlock_irqrestore(&xhci->lock, flags);
2935 return ret;
2936 }
2937 if (num_streams <= 1) {
2938 xhci_warn(xhci, "WARN: endpoints can't handle "
2939 "more than one stream.\n");
2940 xhci_free_command(xhci, config_cmd);
2941 spin_unlock_irqrestore(&xhci->lock, flags);
2942 return -EINVAL;
2943 }
2944 vdev = xhci->devs[udev->slot_id];
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002945 /* Mark each endpoint as being in transition, so
Sarah Sharp8df75f42010-04-02 15:34:16 -07002946 * xhci_urb_enqueue() will reject all URBs.
2947 */
2948 for (i = 0; i < num_eps; i++) {
2949 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2950 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2951 }
2952 spin_unlock_irqrestore(&xhci->lock, flags);
2953
2954 /* Setup internal data structures and allocate HW data structures for
2955 * streams (but don't install the HW structures in the input context
2956 * until we're sure all memory allocation succeeded).
2957 */
2958 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2959 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2960 num_stream_ctxs, num_streams);
2961
2962 for (i = 0; i < num_eps; i++) {
2963 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2964 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2965 num_stream_ctxs,
2966 num_streams, mem_flags);
2967 if (!vdev->eps[ep_index].stream_info)
2968 goto cleanup;
2969 /* Set maxPstreams in endpoint context and update deq ptr to
2970 * point to stream context array. FIXME
2971 */
2972 }
2973
2974 /* Set up the input context for a configure endpoint command. */
2975 for (i = 0; i < num_eps; i++) {
2976 struct xhci_ep_ctx *ep_ctx;
2977
2978 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2979 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2980
2981 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2982 vdev->out_ctx, ep_index);
2983 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2984 vdev->eps[ep_index].stream_info);
2985 }
2986 /* Tell the HW to drop its old copy of the endpoint context info
2987 * and add the updated copy from the input context.
2988 */
2989 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2990 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2991
2992 /* Issue and wait for the configure endpoint command */
2993 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2994 false, false);
2995
2996 /* xHC rejected the configure endpoint command for some reason, so we
2997 * leave the old ring intact and free our internal streams data
2998 * structure.
2999 */
3000 if (ret < 0)
3001 goto cleanup;
3002
3003 spin_lock_irqsave(&xhci->lock, flags);
3004 for (i = 0; i < num_eps; i++) {
3005 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3006 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3007 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3008 udev->slot_id, ep_index);
3009 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3010 }
3011 xhci_free_command(xhci, config_cmd);
3012 spin_unlock_irqrestore(&xhci->lock, flags);
3013
3014 /* Subtract 1 for stream 0, which drivers can't use */
3015 return num_streams - 1;
3016
3017cleanup:
3018 /* If it didn't work, free the streams! */
3019 for (i = 0; i < num_eps; i++) {
3020 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3021 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
Sarah Sharp8a007742010-04-30 15:37:56 -07003022 vdev->eps[ep_index].stream_info = NULL;
Sarah Sharp8df75f42010-04-02 15:34:16 -07003023 /* FIXME Unset maxPstreams in endpoint context and
3024 * update deq ptr to point to normal string ring.
3025 */
3026 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3027 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3028 xhci_endpoint_zero(xhci, vdev, eps[i]);
3029 }
3030 xhci_free_command(xhci, config_cmd);
3031 return -ENOMEM;
3032}
3033
3034/* Transition the endpoint from using streams to being a "normal" endpoint
3035 * without streams.
3036 *
3037 * Modify the endpoint context state, submit a configure endpoint command,
3038 * and free all endpoint rings for streams if that completes successfully.
3039 */
3040int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3041 struct usb_host_endpoint **eps, unsigned int num_eps,
3042 gfp_t mem_flags)
3043{
3044 int i, ret;
3045 struct xhci_hcd *xhci;
3046 struct xhci_virt_device *vdev;
3047 struct xhci_command *command;
3048 unsigned int ep_index;
3049 unsigned long flags;
3050 u32 changed_ep_bitmask;
3051
3052 xhci = hcd_to_xhci(hcd);
3053 vdev = xhci->devs[udev->slot_id];
3054
3055 /* Set up a configure endpoint command to remove the streams rings */
3056 spin_lock_irqsave(&xhci->lock, flags);
3057 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3058 udev, eps, num_eps);
3059 if (changed_ep_bitmask == 0) {
3060 spin_unlock_irqrestore(&xhci->lock, flags);
3061 return -EINVAL;
3062 }
3063
3064 /* Use the xhci_command structure from the first endpoint. We may have
3065 * allocated too many, but the driver may call xhci_free_streams() for
3066 * each endpoint it grouped into one call to xhci_alloc_streams().
3067 */
3068 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3069 command = vdev->eps[ep_index].stream_info->free_streams_command;
3070 for (i = 0; i < num_eps; i++) {
3071 struct xhci_ep_ctx *ep_ctx;
3072
3073 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3074 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3075 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3076 EP_GETTING_NO_STREAMS;
3077
3078 xhci_endpoint_copy(xhci, command->in_ctx,
3079 vdev->out_ctx, ep_index);
3080 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3081 &vdev->eps[ep_index]);
3082 }
3083 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3084 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3085 spin_unlock_irqrestore(&xhci->lock, flags);
3086
3087 /* Issue and wait for the configure endpoint command,
3088 * which must succeed.
3089 */
3090 ret = xhci_configure_endpoint(xhci, udev, command,
3091 false, true);
3092
3093 /* xHC rejected the configure endpoint command for some reason, so we
3094 * leave the streams rings intact.
3095 */
3096 if (ret < 0)
3097 return ret;
3098
3099 spin_lock_irqsave(&xhci->lock, flags);
3100 for (i = 0; i < num_eps; i++) {
3101 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3102 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
Sarah Sharp8a007742010-04-30 15:37:56 -07003103 vdev->eps[ep_index].stream_info = NULL;
Sarah Sharp8df75f42010-04-02 15:34:16 -07003104 /* FIXME Unset maxPstreams in endpoint context and
3105 * update deq ptr to point to normal string ring.
3106 */
3107 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3108 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3109 }
3110 spin_unlock_irqrestore(&xhci->lock, flags);
3111
3112 return 0;
3113}
3114
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003115/*
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003116 * Deletes endpoint resources for endpoints that were active before a Reset
3117 * Device command, or a Disable Slot command. The Reset Device command leaves
3118 * the control endpoint intact, whereas the Disable Slot command deletes it.
3119 *
3120 * Must be called with xhci->lock held.
3121 */
3122void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3123 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3124{
3125 int i;
3126 unsigned int num_dropped_eps = 0;
3127 unsigned int drop_flags = 0;
3128
3129 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3130 if (virt_dev->eps[i].ring) {
3131 drop_flags |= 1 << i;
3132 num_dropped_eps++;
3133 }
3134 }
3135 xhci->num_active_eps -= num_dropped_eps;
3136 if (num_dropped_eps)
3137 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3138 "%u now active.\n",
3139 num_dropped_eps, drop_flags,
3140 xhci->num_active_eps);
3141}
3142
3143/*
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003144 * This submits a Reset Device Command, which will set the device state to 0,
3145 * set the device address to 0, and disable all the endpoints except the default
3146 * control endpoint. The USB core should come back and call
3147 * xhci_address_device(), and then re-set up the configuration. If this is
3148 * called because of a usb_reset_and_verify_device(), then the old alternate
3149 * settings will be re-installed through the normal bandwidth allocation
3150 * functions.
3151 *
3152 * Wait for the Reset Device command to finish. Remove all structures
3153 * associated with the endpoints that were disabled. Clear the input device
3154 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
Andiry Xuf0615c42010-10-14 07:22:48 -07003155 *
3156 * If the virt_dev to be reset does not exist or does not match the udev,
3157 * it means the device is lost, possibly due to the xHC restore error and
3158 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3159 * re-allocate the device.
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003160 */
Andiry Xuf0615c42010-10-14 07:22:48 -07003161int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003162{
3163 int ret, i;
3164 unsigned long flags;
3165 struct xhci_hcd *xhci;
3166 unsigned int slot_id;
3167 struct xhci_virt_device *virt_dev;
3168 struct xhci_command *reset_device_cmd;
3169 int timeleft;
3170 int last_freed_endpoint;
Maarten Lankhorst001fd382011-06-01 23:27:50 +02003171 struct xhci_slot_ctx *slot_ctx;
Sarah Sharp2e279802011-09-02 11:05:50 -07003172 int old_active_eps = 0;
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003173
Andiry Xuf0615c42010-10-14 07:22:48 -07003174 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003175 if (ret <= 0)
3176 return ret;
3177 xhci = hcd_to_xhci(hcd);
3178 slot_id = udev->slot_id;
3179 virt_dev = xhci->devs[slot_id];
Andiry Xuf0615c42010-10-14 07:22:48 -07003180 if (!virt_dev) {
3181 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3182 "not exist. Re-allocate the device\n", slot_id);
3183 ret = xhci_alloc_dev(hcd, udev);
3184 if (ret == 1)
3185 return 0;
3186 else
3187 return -EINVAL;
3188 }
3189
3190 if (virt_dev->udev != udev) {
3191 /* If the virt_dev and the udev does not match, this virt_dev
3192 * may belong to another udev.
3193 * Re-allocate the device.
3194 */
3195 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3196 "not match the udev. Re-allocate the device\n",
3197 slot_id);
3198 ret = xhci_alloc_dev(hcd, udev);
3199 if (ret == 1)
3200 return 0;
3201 else
3202 return -EINVAL;
3203 }
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003204
Maarten Lankhorst001fd382011-06-01 23:27:50 +02003205 /* If device is not setup, there is no point in resetting it */
3206 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3207 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3208 SLOT_STATE_DISABLED)
3209 return 0;
3210
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003211 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3212 /* Allocate the command structure that holds the struct completion.
3213 * Assume we're in process context, since the normal device reset
3214 * process has to wait for the device anyway. Storage devices are
3215 * reset as part of error handling, so use GFP_NOIO instead of
3216 * GFP_KERNEL.
3217 */
3218 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3219 if (!reset_device_cmd) {
3220 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3221 return -ENOMEM;
3222 }
3223
3224 /* Attempt to submit the Reset Device command to the command ring */
3225 spin_lock_irqsave(&xhci->lock, flags);
3226 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08003227
3228 /* Enqueue pointer can be left pointing to the link TRB,
3229 * we must handle that
3230 */
Matt Evansf5960b62011-06-01 10:22:55 +10003231 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08003232 reset_device_cmd->command_trb =
3233 xhci->cmd_ring->enq_seg->next->trbs;
3234
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003235 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3236 ret = xhci_queue_reset_device(xhci, slot_id);
3237 if (ret) {
3238 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3239 list_del(&reset_device_cmd->cmd_list);
3240 spin_unlock_irqrestore(&xhci->lock, flags);
3241 goto command_cleanup;
3242 }
3243 xhci_ring_cmd_db(xhci);
3244 spin_unlock_irqrestore(&xhci->lock, flags);
3245
3246 /* Wait for the Reset Device command to finish */
3247 timeleft = wait_for_completion_interruptible_timeout(
3248 reset_device_cmd->completion,
3249 USB_CTRL_SET_TIMEOUT);
3250 if (timeleft <= 0) {
3251 xhci_warn(xhci, "%s while waiting for reset device command\n",
3252 timeleft == 0 ? "Timeout" : "Signal");
3253 spin_lock_irqsave(&xhci->lock, flags);
3254 /* The timeout might have raced with the event ring handler, so
3255 * only delete from the list if the item isn't poisoned.
3256 */
3257 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3258 list_del(&reset_device_cmd->cmd_list);
3259 spin_unlock_irqrestore(&xhci->lock, flags);
3260 ret = -ETIME;
3261 goto command_cleanup;
3262 }
3263
3264 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3265 * unless we tried to reset a slot ID that wasn't enabled,
3266 * or the device wasn't in the addressed or configured state.
3267 */
3268 ret = reset_device_cmd->status;
3269 switch (ret) {
3270 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3271 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3272 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3273 slot_id,
3274 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3275 xhci_info(xhci, "Not freeing device rings.\n");
3276 /* Don't treat this as an error. May change my mind later. */
3277 ret = 0;
3278 goto command_cleanup;
3279 case COMP_SUCCESS:
3280 xhci_dbg(xhci, "Successful reset device command.\n");
3281 break;
3282 default:
3283 if (xhci_is_vendor_info_code(xhci, ret))
3284 break;
3285 xhci_warn(xhci, "Unknown completion code %u for "
3286 "reset device command.\n", ret);
3287 ret = -EINVAL;
3288 goto command_cleanup;
3289 }
3290
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003291 /* Free up host controller endpoint resources */
3292 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3293 spin_lock_irqsave(&xhci->lock, flags);
3294 /* Don't delete the default control endpoint resources */
3295 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3296 spin_unlock_irqrestore(&xhci->lock, flags);
3297 }
3298
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003299 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3300 last_freed_endpoint = 1;
3301 for (i = 1; i < 31; ++i) {
Dmitry Torokhov2dea75d2011-04-12 23:06:28 -07003302 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3303
3304 if (ep->ep_state & EP_HAS_STREAMS) {
3305 xhci_free_stream_info(xhci, ep->stream_info);
3306 ep->stream_info = NULL;
3307 ep->ep_state &= ~EP_HAS_STREAMS;
3308 }
3309
3310 if (ep->ring) {
3311 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3312 last_freed_endpoint = i;
3313 }
Sarah Sharp2e279802011-09-02 11:05:50 -07003314 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3315 xhci_drop_ep_from_interval_table(xhci,
3316 &virt_dev->eps[i].bw_info,
3317 virt_dev->bw_table,
3318 udev,
3319 &virt_dev->eps[i],
3320 virt_dev->tt_info);
Sarah Sharp9af5d712011-09-02 11:05:48 -07003321 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003322 }
Sarah Sharp2e279802011-09-02 11:05:50 -07003323 /* If necessary, update the number of active TTs on this root port */
3324 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3325
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003326 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3327 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3328 ret = 0;
3329
3330command_cleanup:
3331 xhci_free_command(xhci, reset_device_cmd);
3332 return ret;
3333}
3334
3335/*
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003336 * At this point, the struct usb_device is about to go away, the device has
3337 * disconnected, and all traffic has been stopped and the endpoints have been
3338 * disabled. Free any HC data structures associated with that device.
3339 */
3340void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3341{
3342 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07003343 struct xhci_virt_device *virt_dev;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003344 unsigned long flags;
Sarah Sharpc526d0d2009-09-16 16:42:39 -07003345 u32 state;
Andiry Xu64927732010-10-14 07:22:45 -07003346 int i, ret;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003347
Andiry Xu64927732010-10-14 07:22:45 -07003348 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
Sarah Sharp7bd89b42011-07-01 13:35:40 -07003349 /* If the host is halted due to driver unload, we still need to free the
3350 * device.
3351 */
3352 if (ret <= 0 && ret != -ENODEV)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003353 return;
Andiry Xu64927732010-10-14 07:22:45 -07003354
Sarah Sharp6f5165c2009-10-27 10:57:01 -07003355 virt_dev = xhci->devs[udev->slot_id];
Sarah Sharp6f5165c2009-10-27 10:57:01 -07003356
3357 /* Stop any wayward timer functions (which may grab the lock) */
3358 for (i = 0; i < 31; ++i) {
3359 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3360 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3361 }
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003362
Andiry Xu65580b432011-09-23 14:19:52 -07003363 if (udev->usb2_hw_lpm_enabled) {
3364 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3365 udev->usb2_hw_lpm_enabled = 0;
3366 }
3367
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003368 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharpc526d0d2009-09-16 16:42:39 -07003369 /* Don't disable the slot if the host controller is dead. */
3370 state = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharp7bd89b42011-07-01 13:35:40 -07003371 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3372 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharpc526d0d2009-09-16 16:42:39 -07003373 xhci_free_virt_device(xhci, udev->slot_id);
3374 spin_unlock_irqrestore(&xhci->lock, flags);
3375 return;
3376 }
3377
Sarah Sharp23e3be12009-04-29 19:05:20 -07003378 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003379 spin_unlock_irqrestore(&xhci->lock, flags);
3380 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3381 return;
3382 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07003383 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003384 spin_unlock_irqrestore(&xhci->lock, flags);
3385 /*
3386 * Event command completion handler will free any data structures
Sarah Sharpf88ba782009-05-14 11:44:22 -07003387 * associated with the slot. XXX Can free sleep?
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003388 */
3389}
3390
3391/*
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003392 * Checks if we have enough host controller resources for the default control
3393 * endpoint.
3394 *
3395 * Must be called with xhci->lock held.
3396 */
3397static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3398{
3399 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3400 xhci_dbg(xhci, "Not enough ep ctxs: "
3401 "%u active, need to add 1, limit is %u.\n",
3402 xhci->num_active_eps, xhci->limit_active_eps);
3403 return -ENOMEM;
3404 }
3405 xhci->num_active_eps += 1;
3406 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3407 xhci->num_active_eps);
3408 return 0;
3409}
3410
3411
3412/*
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003413 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3414 * timed out, or allocating memory failed. Returns 1 on success.
3415 */
3416int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3417{
3418 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3419 unsigned long flags;
3420 int timeleft;
3421 int ret;
3422
3423 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp23e3be12009-04-29 19:05:20 -07003424 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003425 if (ret) {
3426 spin_unlock_irqrestore(&xhci->lock, flags);
3427 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3428 return 0;
3429 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07003430 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003431 spin_unlock_irqrestore(&xhci->lock, flags);
3432
3433 /* XXX: how much time for xHC slot assignment? */
3434 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3435 USB_CTRL_SET_TIMEOUT);
3436 if (timeleft <= 0) {
3437 xhci_warn(xhci, "%s while waiting for a slot\n",
3438 timeleft == 0 ? "Timeout" : "Signal");
3439 /* FIXME cancel the enable slot request */
3440 return 0;
3441 }
3442
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003443 if (!xhci->slot_id) {
3444 xhci_err(xhci, "Error while assigning device slot ID\n");
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003445 return 0;
3446 }
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003447
3448 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3449 spin_lock_irqsave(&xhci->lock, flags);
3450 ret = xhci_reserve_host_control_ep_resources(xhci);
3451 if (ret) {
3452 spin_unlock_irqrestore(&xhci->lock, flags);
3453 xhci_warn(xhci, "Not enough host resources, "
3454 "active endpoint contexts = %u\n",
3455 xhci->num_active_eps);
3456 goto disable_slot;
3457 }
3458 spin_unlock_irqrestore(&xhci->lock, flags);
3459 }
3460 /* Use GFP_NOIO, since this function can be called from
Sarah Sharpa6d940d2010-12-28 13:08:42 -08003461 * xhci_discover_or_reset_device(), which may be called as part of
3462 * mass storage driver error handling.
3463 */
3464 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003465 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003466 goto disable_slot;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003467 }
3468 udev->slot_id = xhci->slot_id;
3469 /* Is this a LS or FS device under a HS hub? */
3470 /* Hub or peripherial? */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003471 return 1;
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003472
3473disable_slot:
3474 /* Disable slot, if we can do it without mem alloc */
3475 spin_lock_irqsave(&xhci->lock, flags);
3476 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3477 xhci_ring_cmd_db(xhci);
3478 spin_unlock_irqrestore(&xhci->lock, flags);
3479 return 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003480}
3481
3482/*
3483 * Issue an Address Device command (which will issue a SetAddress request to
3484 * the device).
3485 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3486 * we should only issue and wait on one address command at the same time.
3487 *
3488 * We add one to the device address issued by the hardware because the USB core
3489 * uses address 1 for the root hubs (even though they're not really devices).
3490 */
3491int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3492{
3493 unsigned long flags;
3494 int timeleft;
3495 struct xhci_virt_device *virt_dev;
3496 int ret = 0;
3497 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
John Yound115b042009-07-27 12:05:15 -07003498 struct xhci_slot_ctx *slot_ctx;
3499 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharp8e595a52009-07-27 12:03:31 -07003500 u64 temp_64;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003501
3502 if (!udev->slot_id) {
3503 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3504 return -EINVAL;
3505 }
3506
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003507 virt_dev = xhci->devs[udev->slot_id];
3508
Matt Evans7ed603e2011-03-29 13:40:56 +11003509 if (WARN_ON(!virt_dev)) {
3510 /*
3511 * In plug/unplug torture test with an NEC controller,
3512 * a zero-dereference was observed once due to virt_dev = 0.
3513 * Print useful debug rather than crash if it is observed again!
3514 */
3515 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3516 udev->slot_id);
3517 return -EINVAL;
3518 }
3519
Andiry Xuf0615c42010-10-14 07:22:48 -07003520 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3521 /*
3522 * If this is the first Set Address since device plug-in or
3523 * virt_device realloaction after a resume with an xHCI power loss,
3524 * then set up the slot context.
3525 */
3526 if (!slot_ctx->dev_info)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003527 xhci_setup_addressable_virt_dev(xhci, udev);
Andiry Xuf0615c42010-10-14 07:22:48 -07003528 /* Otherwise, update the control endpoint ring enqueue pointer. */
Sarah Sharp2d1ee592010-07-09 17:08:54 +02003529 else
3530 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
Sarah Sharpd31c2852011-11-03 13:06:08 -07003531 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3532 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3533 ctrl_ctx->drop_flags = 0;
3534
Sarah Sharp66e49d82009-07-27 12:03:46 -07003535 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07003536 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003537
Sarah Sharpf88ba782009-05-14 11:44:22 -07003538 spin_lock_irqsave(&xhci->lock, flags);
John Yound115b042009-07-27 12:05:15 -07003539 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3540 udev->slot_id);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003541 if (ret) {
3542 spin_unlock_irqrestore(&xhci->lock, flags);
3543 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3544 return ret;
3545 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07003546 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003547 spin_unlock_irqrestore(&xhci->lock, flags);
3548
3549 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3550 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3551 USB_CTRL_SET_TIMEOUT);
3552 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3553 * the SetAddress() "recovery interval" required by USB and aborting the
3554 * command on a timeout.
3555 */
3556 if (timeleft <= 0) {
Andiry Xucd681762011-09-23 14:19:55 -07003557 xhci_warn(xhci, "%s while waiting for address device command\n",
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003558 timeleft == 0 ? "Timeout" : "Signal");
3559 /* FIXME cancel the address device command */
3560 return -ETIME;
3561 }
3562
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003563 switch (virt_dev->cmd_status) {
3564 case COMP_CTX_STATE:
3565 case COMP_EBADSLT:
3566 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3567 udev->slot_id);
3568 ret = -EINVAL;
3569 break;
3570 case COMP_TX_ERR:
3571 dev_warn(&udev->dev, "Device not responding to set address.\n");
3572 ret = -EPROTO;
3573 break;
Alex Hef6ba6fe2011-06-08 18:34:06 +08003574 case COMP_DEV_ERR:
3575 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3576 "device command.\n");
3577 ret = -ENODEV;
3578 break;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003579 case COMP_SUCCESS:
3580 xhci_dbg(xhci, "Successful Address Device command\n");
3581 break;
3582 default:
3583 xhci_err(xhci, "ERROR: unexpected command completion "
3584 "code 0x%x.\n", virt_dev->cmd_status);
Sarah Sharp66e49d82009-07-27 12:03:46 -07003585 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07003586 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003587 ret = -EINVAL;
3588 break;
3589 }
3590 if (ret) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003591 return ret;
3592 }
Sarah Sharp8e595a52009-07-27 12:03:31 -07003593 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3594 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3595 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
Matt Evans28ccd292011-03-29 13:40:46 +11003596 udev->slot_id,
3597 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3598 (unsigned long long)
3599 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07003600 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
John Yound115b042009-07-27 12:05:15 -07003601 (unsigned long long)virt_dev->out_ctx->dma);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003602 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07003603 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003604 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07003605 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003606 /*
3607 * USB core uses address 1 for the roothubs, so we add one to the
3608 * address given back to us by the HC.
3609 */
John Yound115b042009-07-27 12:05:15 -07003610 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
Andiry Xuc8d4af82010-10-14 07:22:51 -07003611 /* Use kernel assigned address for devices; store xHC assigned
3612 * address locally. */
Matt Evans28ccd292011-03-29 13:40:46 +11003613 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3614 + 1;
Sarah Sharpf94e01862009-04-27 19:58:38 -07003615 /* Zero the input context control for later use */
John Yound115b042009-07-27 12:05:15 -07003616 ctrl_ctx->add_flags = 0;
3617 ctrl_ctx->drop_flags = 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003618
Andiry Xuc8d4af82010-10-14 07:22:51 -07003619 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003620
3621 return 0;
3622}
3623
Andiry Xu95743232011-09-23 14:19:51 -07003624#ifdef CONFIG_USB_SUSPEND
3625
3626/* BESL to HIRD Encoding array for USB2 LPM */
3627static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3628 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3629
3630/* Calculate HIRD/BESL for USB2 PORTPMSC*/
Andiry Xuf99298b2011-12-12 16:45:28 +08003631static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
3632 struct usb_device *udev)
Andiry Xu95743232011-09-23 14:19:51 -07003633{
Andiry Xuf99298b2011-12-12 16:45:28 +08003634 int u2del, besl, besl_host;
3635 int besl_device = 0;
3636 u32 field;
Andiry Xu95743232011-09-23 14:19:51 -07003637
Andiry Xuf99298b2011-12-12 16:45:28 +08003638 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3639 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
3640
3641 if (field & USB_BESL_SUPPORT) {
3642 for (besl_host = 0; besl_host < 16; besl_host++) {
3643 if (xhci_besl_encoding[besl_host] >= u2del)
Andiry Xu95743232011-09-23 14:19:51 -07003644 break;
3645 }
Andiry Xuf99298b2011-12-12 16:45:28 +08003646 /* Use baseline BESL value as default */
3647 if (field & USB_BESL_BASELINE_VALID)
3648 besl_device = USB_GET_BESL_BASELINE(field);
3649 else if (field & USB_BESL_DEEP_VALID)
3650 besl_device = USB_GET_BESL_DEEP(field);
Andiry Xu95743232011-09-23 14:19:51 -07003651 } else {
3652 if (u2del <= 50)
Andiry Xuf99298b2011-12-12 16:45:28 +08003653 besl_host = 0;
Andiry Xu95743232011-09-23 14:19:51 -07003654 else
Andiry Xuf99298b2011-12-12 16:45:28 +08003655 besl_host = (u2del - 51) / 75 + 1;
Andiry Xu95743232011-09-23 14:19:51 -07003656 }
3657
Andiry Xuf99298b2011-12-12 16:45:28 +08003658 besl = besl_host + besl_device;
3659 if (besl > 15)
3660 besl = 15;
3661
3662 return besl;
Andiry Xu95743232011-09-23 14:19:51 -07003663}
3664
3665static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3666 struct usb_device *udev)
3667{
3668 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3669 struct dev_info *dev_info;
3670 __le32 __iomem **port_array;
3671 __le32 __iomem *addr, *pm_addr;
3672 u32 temp, dev_id;
3673 unsigned int port_num;
3674 unsigned long flags;
Andiry Xuf99298b2011-12-12 16:45:28 +08003675 int hird;
Andiry Xu95743232011-09-23 14:19:51 -07003676 int ret;
3677
3678 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3679 !udev->lpm_capable)
3680 return -EINVAL;
3681
3682 /* we only support lpm for non-hub device connected to root hub yet */
3683 if (!udev->parent || udev->parent->parent ||
3684 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3685 return -EINVAL;
3686
3687 spin_lock_irqsave(&xhci->lock, flags);
3688
3689 /* Look for devices in lpm_failed_devs list */
3690 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3691 le16_to_cpu(udev->descriptor.idProduct);
3692 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3693 if (dev_info->dev_id == dev_id) {
3694 ret = -EINVAL;
3695 goto finish;
3696 }
3697 }
3698
3699 port_array = xhci->usb2_ports;
3700 port_num = udev->portnum - 1;
3701
3702 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3703 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3704 ret = -EINVAL;
3705 goto finish;
3706 }
3707
3708 /*
3709 * Test USB 2.0 software LPM.
3710 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3711 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3712 * in the June 2011 errata release.
3713 */
3714 xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3715 /*
3716 * Set L1 Device Slot and HIRD/BESL.
3717 * Check device's USB 2.0 extension descriptor to determine whether
3718 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3719 */
3720 pm_addr = port_array[port_num] + 1;
Andiry Xuf99298b2011-12-12 16:45:28 +08003721 hird = xhci_calculate_hird_besl(xhci, udev);
Andiry Xu95743232011-09-23 14:19:51 -07003722 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3723 xhci_writel(xhci, temp, pm_addr);
3724
3725 /* Set port link state to U2(L1) */
3726 addr = port_array[port_num];
3727 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3728
3729 /* wait for ACK */
3730 spin_unlock_irqrestore(&xhci->lock, flags);
3731 msleep(10);
3732 spin_lock_irqsave(&xhci->lock, flags);
3733
3734 /* Check L1 Status */
3735 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3736 if (ret != -ETIMEDOUT) {
3737 /* enter L1 successfully */
3738 temp = xhci_readl(xhci, addr);
3739 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3740 port_num, temp);
3741 ret = 0;
3742 } else {
3743 temp = xhci_readl(xhci, pm_addr);
3744 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3745 port_num, temp & PORT_L1S_MASK);
3746 ret = -EINVAL;
3747 }
3748
3749 /* Resume the port */
3750 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3751
3752 spin_unlock_irqrestore(&xhci->lock, flags);
3753 msleep(10);
3754 spin_lock_irqsave(&xhci->lock, flags);
3755
3756 /* Clear PLC */
3757 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3758
3759 /* Check PORTSC to make sure the device is in the right state */
3760 if (!ret) {
3761 temp = xhci_readl(xhci, addr);
3762 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3763 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3764 (temp & PORT_PLS_MASK) != XDEV_U0) {
3765 xhci_dbg(xhci, "port L1 resume fail\n");
3766 ret = -EINVAL;
3767 }
3768 }
3769
3770 if (ret) {
3771 /* Insert dev to lpm_failed_devs list */
3772 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3773 "re-enumerate\n");
3774 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3775 if (!dev_info) {
3776 ret = -ENOMEM;
3777 goto finish;
3778 }
3779 dev_info->dev_id = dev_id;
3780 INIT_LIST_HEAD(&dev_info->list);
3781 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3782 } else {
3783 xhci_ring_device(xhci, udev->slot_id);
3784 }
3785
3786finish:
3787 spin_unlock_irqrestore(&xhci->lock, flags);
3788 return ret;
3789}
3790
Andiry Xu65580b432011-09-23 14:19:52 -07003791int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3792 struct usb_device *udev, int enable)
3793{
3794 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3795 __le32 __iomem **port_array;
3796 __le32 __iomem *pm_addr;
3797 u32 temp;
3798 unsigned int port_num;
3799 unsigned long flags;
Andiry Xuf99298b2011-12-12 16:45:28 +08003800 int hird;
Andiry Xu65580b432011-09-23 14:19:52 -07003801
3802 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3803 !udev->lpm_capable)
3804 return -EPERM;
3805
3806 if (!udev->parent || udev->parent->parent ||
3807 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3808 return -EPERM;
3809
3810 if (udev->usb2_hw_lpm_capable != 1)
3811 return -EPERM;
3812
3813 spin_lock_irqsave(&xhci->lock, flags);
3814
3815 port_array = xhci->usb2_ports;
3816 port_num = udev->portnum - 1;
3817 pm_addr = port_array[port_num] + 1;
3818 temp = xhci_readl(xhci, pm_addr);
3819
3820 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3821 enable ? "enable" : "disable", port_num);
3822
Andiry Xuf99298b2011-12-12 16:45:28 +08003823 hird = xhci_calculate_hird_besl(xhci, udev);
Andiry Xu65580b432011-09-23 14:19:52 -07003824
3825 if (enable) {
3826 temp &= ~PORT_HIRD_MASK;
3827 temp |= PORT_HIRD(hird) | PORT_RWE;
3828 xhci_writel(xhci, temp, pm_addr);
3829 temp = xhci_readl(xhci, pm_addr);
3830 temp |= PORT_HLE;
3831 xhci_writel(xhci, temp, pm_addr);
3832 } else {
3833 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3834 xhci_writel(xhci, temp, pm_addr);
3835 }
3836
3837 spin_unlock_irqrestore(&xhci->lock, flags);
3838 return 0;
3839}
3840
Sarah Sharpb01bcbf2012-05-21 07:54:42 -07003841int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3842{
3843 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3844 int ret;
3845
3846 ret = xhci_usb2_software_lpm_test(hcd, udev);
3847 if (!ret) {
3848 xhci_dbg(xhci, "software LPM test succeed\n");
3849 if (xhci->hw_lpm_support == 1) {
3850 udev->usb2_hw_lpm_capable = 1;
3851 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
3852 if (!ret)
3853 udev->usb2_hw_lpm_enabled = 1;
3854 }
3855 }
3856
3857 return 0;
3858}
3859
3860#else
3861
3862int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3863 struct usb_device *udev, int enable)
3864{
3865 return 0;
3866}
3867
3868int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3869{
3870 return 0;
3871}
3872
3873#endif /* CONFIG_USB_SUSPEND */
3874
Sarah Sharp3b3db022012-05-09 10:55:03 -07003875/*---------------------- USB 3.0 Link PM functions ------------------------*/
3876
Sarah Sharpb01bcbf2012-05-21 07:54:42 -07003877#ifdef CONFIG_PM
Sarah Sharpe3567d22012-05-16 13:36:24 -07003878/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
3879static unsigned long long xhci_service_interval_to_ns(
3880 struct usb_endpoint_descriptor *desc)
3881{
3882 return (1 << (desc->bInterval - 1)) * 125 * 1000;
3883}
3884
Sarah Sharp3b3db022012-05-09 10:55:03 -07003885static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
3886 enum usb3_link_state state)
3887{
3888 unsigned long long sel;
3889 unsigned long long pel;
3890 unsigned int max_sel_pel;
3891 char *state_name;
3892
3893 switch (state) {
3894 case USB3_LPM_U1:
3895 /* Convert SEL and PEL stored in nanoseconds to microseconds */
3896 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
3897 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
3898 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
3899 state_name = "U1";
3900 break;
3901 case USB3_LPM_U2:
3902 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
3903 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
3904 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
3905 state_name = "U2";
3906 break;
3907 default:
3908 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
3909 __func__);
Sarah Sharpe25e62a2012-06-07 11:10:32 -07003910 return USB3_LPM_DISABLED;
Sarah Sharp3b3db022012-05-09 10:55:03 -07003911 }
3912
3913 if (sel <= max_sel_pel && pel <= max_sel_pel)
3914 return USB3_LPM_DEVICE_INITIATED;
3915
3916 if (sel > max_sel_pel)
3917 dev_dbg(&udev->dev, "Device-initiated %s disabled "
3918 "due to long SEL %llu ms\n",
3919 state_name, sel);
3920 else
3921 dev_dbg(&udev->dev, "Device-initiated %s disabled "
3922 "due to long PEL %llu\n ms",
3923 state_name, pel);
3924 return USB3_LPM_DISABLED;
3925}
3926
Sarah Sharpe3567d22012-05-16 13:36:24 -07003927/* Returns the hub-encoded U1 timeout value.
3928 * The U1 timeout should be the maximum of the following values:
3929 * - For control endpoints, U1 system exit latency (SEL) * 3
3930 * - For bulk endpoints, U1 SEL * 5
3931 * - For interrupt endpoints:
3932 * - Notification EPs, U1 SEL * 3
3933 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
3934 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
3935 */
3936static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
3937 struct usb_endpoint_descriptor *desc)
3938{
3939 unsigned long long timeout_ns;
3940 int ep_type;
3941 int intr_type;
3942
3943 ep_type = usb_endpoint_type(desc);
3944 switch (ep_type) {
3945 case USB_ENDPOINT_XFER_CONTROL:
3946 timeout_ns = udev->u1_params.sel * 3;
3947 break;
3948 case USB_ENDPOINT_XFER_BULK:
3949 timeout_ns = udev->u1_params.sel * 5;
3950 break;
3951 case USB_ENDPOINT_XFER_INT:
3952 intr_type = usb_endpoint_interrupt_type(desc);
3953 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
3954 timeout_ns = udev->u1_params.sel * 3;
3955 break;
3956 }
3957 /* Otherwise the calculation is the same as isoc eps */
3958 case USB_ENDPOINT_XFER_ISOC:
3959 timeout_ns = xhci_service_interval_to_ns(desc);
Sarah Sharpc88db162012-05-21 08:44:33 -07003960 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
Sarah Sharpe3567d22012-05-16 13:36:24 -07003961 if (timeout_ns < udev->u1_params.sel * 2)
3962 timeout_ns = udev->u1_params.sel * 2;
3963 break;
3964 default:
3965 return 0;
3966 }
3967
3968 /* The U1 timeout is encoded in 1us intervals. */
Sarah Sharpc88db162012-05-21 08:44:33 -07003969 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
Sarah Sharpe3567d22012-05-16 13:36:24 -07003970 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
3971 if (timeout_ns == USB3_LPM_DISABLED)
3972 timeout_ns++;
3973
3974 /* If the necessary timeout value is bigger than what we can set in the
3975 * USB 3.0 hub, we have to disable hub-initiated U1.
3976 */
3977 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
3978 return timeout_ns;
3979 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
3980 "due to long timeout %llu ms\n", timeout_ns);
3981 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
3982}
3983
3984/* Returns the hub-encoded U2 timeout value.
3985 * The U2 timeout should be the maximum of:
3986 * - 10 ms (to avoid the bandwidth impact on the scheduler)
3987 * - largest bInterval of any active periodic endpoint (to avoid going
3988 * into lower power link states between intervals).
3989 * - the U2 Exit Latency of the device
3990 */
3991static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
3992 struct usb_endpoint_descriptor *desc)
3993{
3994 unsigned long long timeout_ns;
3995 unsigned long long u2_del_ns;
3996
3997 timeout_ns = 10 * 1000 * 1000;
3998
3999 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4000 (xhci_service_interval_to_ns(desc) > timeout_ns))
4001 timeout_ns = xhci_service_interval_to_ns(desc);
4002
4003 u2_del_ns = udev->bos->ss_cap->bU2DevExitLat * 1000;
4004 if (u2_del_ns > timeout_ns)
4005 timeout_ns = u2_del_ns;
4006
4007 /* The U2 timeout is encoded in 256us intervals */
Sarah Sharpc88db162012-05-21 08:44:33 -07004008 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
Sarah Sharpe3567d22012-05-16 13:36:24 -07004009 /* If the necessary timeout value is bigger than what we can set in the
4010 * USB 3.0 hub, we have to disable hub-initiated U2.
4011 */
4012 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4013 return timeout_ns;
4014 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4015 "due to long timeout %llu ms\n", timeout_ns);
4016 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4017}
4018
Sarah Sharp3b3db022012-05-09 10:55:03 -07004019static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4020 struct usb_device *udev,
4021 struct usb_endpoint_descriptor *desc,
4022 enum usb3_link_state state,
4023 u16 *timeout)
4024{
Sarah Sharpe3567d22012-05-16 13:36:24 -07004025 if (state == USB3_LPM_U1) {
4026 if (xhci->quirks & XHCI_INTEL_HOST)
4027 return xhci_calculate_intel_u1_timeout(udev, desc);
4028 } else {
4029 if (xhci->quirks & XHCI_INTEL_HOST)
4030 return xhci_calculate_intel_u2_timeout(udev, desc);
4031 }
4032
Sarah Sharp3b3db022012-05-09 10:55:03 -07004033 return USB3_LPM_DISABLED;
4034}
4035
4036static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4037 struct usb_device *udev,
4038 struct usb_endpoint_descriptor *desc,
4039 enum usb3_link_state state,
4040 u16 *timeout)
4041{
4042 u16 alt_timeout;
4043
4044 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4045 desc, state, timeout);
4046
4047 /* If we found we can't enable hub-initiated LPM, or
4048 * the U1 or U2 exit latency was too high to allow
4049 * device-initiated LPM as well, just stop searching.
4050 */
4051 if (alt_timeout == USB3_LPM_DISABLED ||
4052 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4053 *timeout = alt_timeout;
4054 return -E2BIG;
4055 }
4056 if (alt_timeout > *timeout)
4057 *timeout = alt_timeout;
4058 return 0;
4059}
4060
4061static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4062 struct usb_device *udev,
4063 struct usb_host_interface *alt,
4064 enum usb3_link_state state,
4065 u16 *timeout)
4066{
4067 int j;
4068
4069 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4070 if (xhci_update_timeout_for_endpoint(xhci, udev,
4071 &alt->endpoint[j].desc, state, timeout))
4072 return -E2BIG;
4073 continue;
4074 }
4075 return 0;
4076}
4077
Sarah Sharpe3567d22012-05-16 13:36:24 -07004078static int xhci_check_intel_tier_policy(struct usb_device *udev,
4079 enum usb3_link_state state)
4080{
4081 struct usb_device *parent;
4082 unsigned int num_hubs;
4083
4084 if (state == USB3_LPM_U2)
4085 return 0;
4086
4087 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4088 for (parent = udev->parent, num_hubs = 0; parent->parent;
4089 parent = parent->parent)
4090 num_hubs++;
4091
4092 if (num_hubs < 2)
4093 return 0;
4094
4095 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4096 " below second-tier hub.\n");
4097 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4098 "to decrease power consumption.\n");
4099 return -E2BIG;
4100}
4101
Sarah Sharp3b3db022012-05-09 10:55:03 -07004102static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4103 struct usb_device *udev,
4104 enum usb3_link_state state)
4105{
Sarah Sharpe3567d22012-05-16 13:36:24 -07004106 if (xhci->quirks & XHCI_INTEL_HOST)
4107 return xhci_check_intel_tier_policy(udev, state);
Sarah Sharp3b3db022012-05-09 10:55:03 -07004108 return -EINVAL;
4109}
4110
4111/* Returns the U1 or U2 timeout that should be enabled.
4112 * If the tier check or timeout setting functions return with a non-zero exit
4113 * code, that means the timeout value has been finalized and we shouldn't look
4114 * at any more endpoints.
4115 */
4116static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4117 struct usb_device *udev, enum usb3_link_state state)
4118{
4119 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4120 struct usb_host_config *config;
4121 char *state_name;
4122 int i;
4123 u16 timeout = USB3_LPM_DISABLED;
4124
4125 if (state == USB3_LPM_U1)
4126 state_name = "U1";
4127 else if (state == USB3_LPM_U2)
4128 state_name = "U2";
4129 else {
4130 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4131 state);
4132 return timeout;
4133 }
4134
4135 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4136 return timeout;
4137
4138 /* Gather some information about the currently installed configuration
4139 * and alternate interface settings.
4140 */
4141 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4142 state, &timeout))
4143 return timeout;
4144
4145 config = udev->actconfig;
4146 if (!config)
4147 return timeout;
4148
4149 for (i = 0; i < USB_MAXINTERFACES; i++) {
4150 struct usb_driver *driver;
4151 struct usb_interface *intf = config->interface[i];
4152
4153 if (!intf)
4154 continue;
4155
4156 /* Check if any currently bound drivers want hub-initiated LPM
4157 * disabled.
4158 */
4159 if (intf->dev.driver) {
4160 driver = to_usb_driver(intf->dev.driver);
4161 if (driver && driver->disable_hub_initiated_lpm) {
4162 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4163 "at request of driver %s\n",
4164 state_name, driver->name);
4165 return xhci_get_timeout_no_hub_lpm(udev, state);
4166 }
4167 }
4168
4169 /* Not sure how this could happen... */
4170 if (!intf->cur_altsetting)
4171 continue;
4172
4173 if (xhci_update_timeout_for_interface(xhci, udev,
4174 intf->cur_altsetting,
4175 state, &timeout))
4176 return timeout;
4177 }
4178 return timeout;
4179}
4180
4181/*
4182 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4183 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4184 */
4185static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4186 struct usb_device *udev, u16 max_exit_latency)
4187{
4188 struct xhci_virt_device *virt_dev;
4189 struct xhci_command *command;
4190 struct xhci_input_control_ctx *ctrl_ctx;
4191 struct xhci_slot_ctx *slot_ctx;
4192 unsigned long flags;
4193 int ret;
4194
4195 spin_lock_irqsave(&xhci->lock, flags);
4196 if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
4197 spin_unlock_irqrestore(&xhci->lock, flags);
4198 return 0;
4199 }
4200
4201 /* Attempt to issue an Evaluate Context command to change the MEL. */
4202 virt_dev = xhci->devs[udev->slot_id];
4203 command = xhci->lpm_command;
4204 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4205 spin_unlock_irqrestore(&xhci->lock, flags);
4206
4207 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
4208 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4209 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4210 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4211 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4212
4213 xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
4214 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4215 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4216
4217 /* Issue and wait for the evaluate context command. */
4218 ret = xhci_configure_endpoint(xhci, udev, command,
4219 true, true);
4220 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4221 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4222
4223 if (!ret) {
4224 spin_lock_irqsave(&xhci->lock, flags);
4225 virt_dev->current_mel = max_exit_latency;
4226 spin_unlock_irqrestore(&xhci->lock, flags);
4227 }
4228 return ret;
4229}
4230
4231static int calculate_max_exit_latency(struct usb_device *udev,
4232 enum usb3_link_state state_changed,
4233 u16 hub_encoded_timeout)
4234{
4235 unsigned long long u1_mel_us = 0;
4236 unsigned long long u2_mel_us = 0;
4237 unsigned long long mel_us = 0;
4238 bool disabling_u1;
4239 bool disabling_u2;
4240 bool enabling_u1;
4241 bool enabling_u2;
4242
4243 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4244 hub_encoded_timeout == USB3_LPM_DISABLED);
4245 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4246 hub_encoded_timeout == USB3_LPM_DISABLED);
4247
4248 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4249 hub_encoded_timeout != USB3_LPM_DISABLED);
4250 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4251 hub_encoded_timeout != USB3_LPM_DISABLED);
4252
4253 /* If U1 was already enabled and we're not disabling it,
4254 * or we're going to enable U1, account for the U1 max exit latency.
4255 */
4256 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4257 enabling_u1)
4258 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4259 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4260 enabling_u2)
4261 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4262
4263 if (u1_mel_us > u2_mel_us)
4264 mel_us = u1_mel_us;
4265 else
4266 mel_us = u2_mel_us;
4267 /* xHCI host controller max exit latency field is only 16 bits wide. */
4268 if (mel_us > MAX_EXIT) {
4269 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4270 "is too big.\n", mel_us);
4271 return -E2BIG;
4272 }
4273 return mel_us;
4274}
4275
4276/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4277int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4278 struct usb_device *udev, enum usb3_link_state state)
4279{
4280 struct xhci_hcd *xhci;
4281 u16 hub_encoded_timeout;
4282 int mel;
4283 int ret;
4284
4285 xhci = hcd_to_xhci(hcd);
4286 /* The LPM timeout values are pretty host-controller specific, so don't
4287 * enable hub-initiated timeouts unless the vendor has provided
4288 * information about their timeout algorithm.
4289 */
4290 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4291 !xhci->devs[udev->slot_id])
4292 return USB3_LPM_DISABLED;
4293
4294 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4295 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4296 if (mel < 0) {
4297 /* Max Exit Latency is too big, disable LPM. */
4298 hub_encoded_timeout = USB3_LPM_DISABLED;
4299 mel = 0;
4300 }
4301
4302 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4303 if (ret)
4304 return ret;
4305 return hub_encoded_timeout;
4306}
4307
4308int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4309 struct usb_device *udev, enum usb3_link_state state)
4310{
4311 struct xhci_hcd *xhci;
4312 u16 mel;
4313 int ret;
4314
4315 xhci = hcd_to_xhci(hcd);
4316 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4317 !xhci->devs[udev->slot_id])
4318 return 0;
4319
4320 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4321 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4322 if (ret)
4323 return ret;
4324 return 0;
4325}
Sarah Sharpb01bcbf2012-05-21 07:54:42 -07004326#else /* CONFIG_PM */
4327
4328int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4329 struct usb_device *udev, enum usb3_link_state state)
4330{
4331 return USB3_LPM_DISABLED;
4332}
4333
4334int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4335 struct usb_device *udev, enum usb3_link_state state)
4336{
4337 return 0;
4338}
4339#endif /* CONFIG_PM */
4340
Sarah Sharp3b3db022012-05-09 10:55:03 -07004341/*-------------------------------------------------------------------------*/
4342
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004343/* Once a hub descriptor is fetched for a device, we need to update the xHC's
4344 * internal data structures for the device.
4345 */
4346int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4347 struct usb_tt *tt, gfp_t mem_flags)
4348{
4349 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4350 struct xhci_virt_device *vdev;
4351 struct xhci_command *config_cmd;
4352 struct xhci_input_control_ctx *ctrl_ctx;
4353 struct xhci_slot_ctx *slot_ctx;
4354 unsigned long flags;
4355 unsigned think_time;
4356 int ret;
4357
4358 /* Ignore root hubs */
4359 if (!hdev->parent)
4360 return 0;
4361
4362 vdev = xhci->devs[hdev->slot_id];
4363 if (!vdev) {
4364 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4365 return -EINVAL;
4366 }
Sarah Sharpa1d78c12009-12-09 15:59:03 -08004367 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004368 if (!config_cmd) {
4369 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4370 return -ENOMEM;
4371 }
4372
4373 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp839c8172011-09-02 11:05:47 -07004374 if (hdev->speed == USB_SPEED_HIGH &&
4375 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4376 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4377 xhci_free_command(xhci, config_cmd);
4378 spin_unlock_irqrestore(&xhci->lock, flags);
4379 return -ENOMEM;
4380 }
4381
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004382 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4383 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11004384 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004385 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11004386 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004387 if (tt->multi)
Matt Evans28ccd292011-03-29 13:40:46 +11004388 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004389 if (xhci->hci_version > 0x95) {
4390 xhci_dbg(xhci, "xHCI version %x needs hub "
4391 "TT think time and number of ports\n",
4392 (unsigned int) xhci->hci_version);
Matt Evans28ccd292011-03-29 13:40:46 +11004393 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004394 /* Set TT think time - convert from ns to FS bit times.
4395 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4396 * 2 = 24 FS bit times, 3 = 32 FS bit times.
Andiry Xu700b4172011-05-05 18:14:05 +08004397 *
4398 * xHCI 1.0: this field shall be 0 if the device is not a
4399 * High-spped hub.
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004400 */
4401 think_time = tt->think_time;
4402 if (think_time != 0)
4403 think_time = (think_time / 666) - 1;
Andiry Xu700b4172011-05-05 18:14:05 +08004404 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4405 slot_ctx->tt_info |=
4406 cpu_to_le32(TT_THINK_TIME(think_time));
Sarah Sharpac1c1b72009-09-04 10:53:20 -07004407 } else {
4408 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4409 "TT think time or number of ports\n",
4410 (unsigned int) xhci->hci_version);
4411 }
4412 slot_ctx->dev_state = 0;
4413 spin_unlock_irqrestore(&xhci->lock, flags);
4414
4415 xhci_dbg(xhci, "Set up %s for hub device.\n",
4416 (xhci->hci_version > 0x95) ?
4417 "configure endpoint" : "evaluate context");
4418 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4419 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4420
4421 /* Issue and wait for the configure endpoint or
4422 * evaluate context command.
4423 */
4424 if (xhci->hci_version > 0x95)
4425 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4426 false, false);
4427 else
4428 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4429 true, false);
4430
4431 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4432 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4433
4434 xhci_free_command(xhci, config_cmd);
4435 return ret;
4436}
4437
Sarah Sharp66d4ead2009-04-27 19:52:28 -07004438int xhci_get_frame(struct usb_hcd *hcd)
4439{
4440 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4441 /* EHCI mods by the periodic size. Why? */
4442 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
4443}
4444
Sebastian Andrzej Siewior552e0c42011-09-23 14:20:01 -07004445int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4446{
4447 struct xhci_hcd *xhci;
4448 struct device *dev = hcd->self.controller;
4449 int retval;
4450 u32 temp;
4451
Andiry Xufdaf8b32012-03-05 17:49:38 +08004452 /* Accept arbitrarily long scatter-gather lists */
4453 hcd->self.sg_tablesize = ~0;
Hans de Goede19181bc2012-07-04 09:18:02 +02004454 /* XHCI controllers don't stop the ep queue on short packets :| */
4455 hcd->self.no_stop_on_short = 1;
Sebastian Andrzej Siewior552e0c42011-09-23 14:20:01 -07004456
4457 if (usb_hcd_is_primary_hcd(hcd)) {
4458 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4459 if (!xhci)
4460 return -ENOMEM;
4461 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4462 xhci->main_hcd = hcd;
4463 /* Mark the first roothub as being USB 2.0.
4464 * The xHCI driver will register the USB 3.0 roothub.
4465 */
4466 hcd->speed = HCD_USB2;
4467 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4468 /*
4469 * USB 2.0 roothub under xHCI has an integrated TT,
4470 * (rate matching hub) as opposed to having an OHCI/UHCI
4471 * companion controller.
4472 */
4473 hcd->has_tt = 1;
4474 } else {
4475 /* xHCI private pointer was set in xhci_pci_probe for the second
4476 * registered roothub.
4477 */
4478 xhci = hcd_to_xhci(hcd);
4479 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4480 if (HCC_64BIT_ADDR(temp)) {
4481 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4482 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4483 } else {
4484 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4485 }
4486 return 0;
4487 }
4488
4489 xhci->cap_regs = hcd->regs;
4490 xhci->op_regs = hcd->regs +
4491 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
4492 xhci->run_regs = hcd->regs +
4493 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4494 /* Cache read-only capability registers */
4495 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
4496 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
4497 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
4498 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
4499 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4500 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4501 xhci_print_registers(xhci);
4502
4503 get_quirks(dev, xhci);
4504
4505 /* Make sure the HC is halted. */
4506 retval = xhci_halt(xhci);
4507 if (retval)
4508 goto error;
4509
4510 xhci_dbg(xhci, "Resetting HCD\n");
4511 /* Reset the internal HC memory state and registers. */
4512 retval = xhci_reset(xhci);
4513 if (retval)
4514 goto error;
4515 xhci_dbg(xhci, "Reset complete\n");
4516
4517 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4518 if (HCC_64BIT_ADDR(temp)) {
4519 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4520 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4521 } else {
4522 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4523 }
4524
4525 xhci_dbg(xhci, "Calling HCD init\n");
4526 /* Initialize HCD and host controller data structures. */
4527 retval = xhci_init(hcd);
4528 if (retval)
4529 goto error;
4530 xhci_dbg(xhci, "Called HCD init\n");
4531 return 0;
4532error:
4533 kfree(xhci);
4534 return retval;
4535}
4536
Sarah Sharp66d4ead2009-04-27 19:52:28 -07004537MODULE_DESCRIPTION(DRIVER_DESC);
4538MODULE_AUTHOR(DRIVER_AUTHOR);
4539MODULE_LICENSE("GPL");
4540
4541static int __init xhci_hcd_init(void)
4542{
Sebastian Andrzej Siewior0cc47d52011-09-23 14:20:02 -07004543 int retval;
Sarah Sharp66d4ead2009-04-27 19:52:28 -07004544
4545 retval = xhci_register_pci();
Sarah Sharp66d4ead2009-04-27 19:52:28 -07004546 if (retval < 0) {
4547 printk(KERN_DEBUG "Problem registering PCI driver.");
4548 return retval;
4549 }
Sebastian Andrzej Siewior3429e912012-03-13 16:57:41 +02004550 retval = xhci_register_plat();
4551 if (retval < 0) {
4552 printk(KERN_DEBUG "Problem registering platform driver.");
4553 goto unreg_pci;
4554 }
Sarah Sharp98441972009-05-14 11:44:18 -07004555 /*
4556 * Check the compiler generated sizes of structures that must be laid
4557 * out in specific ways for hardware access.
4558 */
4559 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4560 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4561 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4562 /* xhci_device_control has eight fields, and also
4563 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4564 */
Sarah Sharp98441972009-05-14 11:44:18 -07004565 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4566 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4567 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4568 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4569 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4570 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4571 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
Sarah Sharp66d4ead2009-04-27 19:52:28 -07004572 return 0;
Sebastian Andrzej Siewior3429e912012-03-13 16:57:41 +02004573unreg_pci:
4574 xhci_unregister_pci();
4575 return retval;
Sarah Sharp66d4ead2009-04-27 19:52:28 -07004576}
4577module_init(xhci_hcd_init);
4578
4579static void __exit xhci_hcd_cleanup(void)
4580{
Sarah Sharp66d4ead2009-04-27 19:52:28 -07004581 xhci_unregister_pci();
Sebastian Andrzej Siewior3429e912012-03-13 16:57:41 +02004582 xhci_unregister_plat();
Sarah Sharp66d4ead2009-04-27 19:52:28 -07004583}
4584module_exit(xhci_hcd_cleanup);