blob: 583a3e33010599eeddfc3baf2076eb44e07fc228 [file] [log] [blame]
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Dong Nguyen43b86af2010-07-21 16:56:08 -070023#include <linux/pci.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070024#include <linux/irq.h>
Sarah Sharp8df75f42010-04-02 15:34:16 -070025#include <linux/log2.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070026#include <linux/module.h>
Sarah Sharpb0567b32009-08-07 14:04:36 -070027#include <linux/moduleparam.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070029
30#include "xhci.h"
31
32#define DRIVER_AUTHOR "Sarah Sharp"
33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
34
Sarah Sharpb0567b32009-08-07 14:04:36 -070035/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36static int link_quirk;
37module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
39
Sarah Sharp66d4ead2009-04-27 19:52:28 -070040/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/*
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
47 *
48 * Returns negative errno, or zero on success
49 *
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
53 */
54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 u32 mask, u32 done, int usec)
56{
57 u32 result;
58
59 do {
60 result = xhci_readl(xhci, ptr);
61 if (result == ~(u32)0) /* card removed */
62 return -ENODEV;
63 result &= mask;
64 if (result == done)
65 return 0;
66 udelay(1);
67 usec--;
68 } while (usec > 0);
69 return -ETIMEDOUT;
70}
71
72/*
Sarah Sharp4f0f0ba2009-10-27 10:56:33 -070073 * Disable interrupts and begin the xHCI halting process.
74 */
75void xhci_quiesce(struct xhci_hcd *xhci)
76{
77 u32 halted;
78 u32 cmd;
79 u32 mask;
80
81 mask = ~(XHCI_IRQS);
82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
83 if (!halted)
84 mask &= ~CMD_RUN;
85
86 cmd = xhci_readl(xhci, &xhci->op_regs->command);
87 cmd &= mask;
88 xhci_writel(xhci, cmd, &xhci->op_regs->command);
89}
90
91/*
Sarah Sharp66d4ead2009-04-27 19:52:28 -070092 * Force HC into halt state.
93 *
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
Andiry Xubdfca502011-01-06 15:43:39 +080096 * should halt within 16 ms of the run/stop bit being cleared.
Sarah Sharp66d4ead2009-04-27 19:52:28 -070097 * Read HC Halted bit in the status register to see when the HC is finished.
Sarah Sharp66d4ead2009-04-27 19:52:28 -070098 */
99int xhci_halt(struct xhci_hcd *xhci)
100{
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800101 int ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700102 xhci_dbg(xhci, "// Halt the HC\n");
Sarah Sharp4f0f0ba2009-10-27 10:56:33 -0700103 xhci_quiesce(xhci);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700104
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800105 ret = handshake(xhci, &xhci->op_regs->status,
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800107 if (!ret)
108 xhci->xhc_state |= XHCI_STATE_HALTED;
109 return ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700110}
111
112/*
Sarah Sharped074532010-05-24 13:25:21 -0700113 * Set the run bit and wait for the host to be running.
114 */
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800115static int xhci_start(struct xhci_hcd *xhci)
Sarah Sharped074532010-05-24 13:25:21 -0700116{
117 u32 temp;
118 int ret;
119
120 temp = xhci_readl(xhci, &xhci->op_regs->command);
121 temp |= (CMD_RUN);
122 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
123 temp);
124 xhci_writel(xhci, temp, &xhci->op_regs->command);
125
126 /*
127 * Wait for the HCHalted Status bit to be 0 to indicate the host is
128 * running.
129 */
130 ret = handshake(xhci, &xhci->op_regs->status,
131 STS_HALT, 0, XHCI_MAX_HALT_USEC);
132 if (ret == -ETIMEDOUT)
133 xhci_err(xhci, "Host took too long to start, "
134 "waited %u microseconds.\n",
135 XHCI_MAX_HALT_USEC);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800136 if (!ret)
137 xhci->xhc_state &= ~XHCI_STATE_HALTED;
Sarah Sharped074532010-05-24 13:25:21 -0700138 return ret;
139}
140
141/*
Sarah Sharpac04e6f2011-03-11 08:47:33 -0800142 * Reset a halted HC.
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700143 *
144 * This resets pipelines, timers, counters, state machines, etc.
145 * Transactions will be terminated immediately, and operational registers
146 * will be set to their defaults.
147 */
148int xhci_reset(struct xhci_hcd *xhci)
149{
150 u32 command;
151 u32 state;
Sarah Sharp2d62f3e2010-05-24 13:25:15 -0700152 int ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700153
154 state = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharpd3512f62009-07-27 12:03:50 -0700155 if ((state & STS_HALT) == 0) {
156 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
157 return 0;
158 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700159
160 xhci_dbg(xhci, "// Reset the HC\n");
161 command = xhci_readl(xhci, &xhci->op_regs->command);
162 command |= CMD_RESET;
163 xhci_writel(xhci, command, &xhci->op_regs->command);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700164
Sarah Sharp2d62f3e2010-05-24 13:25:15 -0700165 ret = handshake(xhci, &xhci->op_regs->command,
166 CMD_RESET, 0, 250 * 1000);
167 if (ret)
168 return ret;
169
170 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
171 /*
172 * xHCI cannot write to any doorbells or operational registers other
173 * than status until the "Controller Not Ready" flag is cleared.
174 */
175 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700176}
177
Dong Nguyen43b86af2010-07-21 16:56:08 -0700178/*
179 * Free IRQs
180 * free all IRQs request
181 */
182static void xhci_free_irq(struct xhci_hcd *xhci)
183{
184 int i;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700185 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
186
Dong Nguyen43b86af2010-07-21 16:56:08 -0700187 /* return if using legacy interrupt */
188 if (xhci_to_hcd(xhci)->irq >= 0)
189 return;
190
191 if (xhci->msix_entries) {
192 for (i = 0; i < xhci->msix_count; i++)
193 if (xhci->msix_entries[i].vector)
194 free_irq(xhci->msix_entries[i].vector,
195 xhci_to_hcd(xhci));
196 } else if (pdev->irq >= 0)
197 free_irq(pdev->irq, xhci_to_hcd(xhci));
198
199 return;
200}
201
202/*
203 * Set up MSI
204 */
205static int xhci_setup_msi(struct xhci_hcd *xhci)
206{
207 int ret;
208 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
209
210 ret = pci_enable_msi(pdev);
211 if (ret) {
212 xhci_err(xhci, "failed to allocate MSI entry\n");
213 return ret;
214 }
215
216 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
217 0, "xhci_hcd", xhci_to_hcd(xhci));
218 if (ret) {
219 xhci_err(xhci, "disable MSI interrupt\n");
220 pci_disable_msi(pdev);
221 }
222
223 return ret;
224}
225
226/*
227 * Set up MSI-X
228 */
229static int xhci_setup_msix(struct xhci_hcd *xhci)
230{
231 int i, ret = 0;
Andiry Xu00292272010-12-27 17:39:02 +0800232 struct usb_hcd *hcd = xhci_to_hcd(xhci);
233 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700234
235 /*
236 * calculate number of msi-x vectors supported.
237 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
238 * with max number of interrupters based on the xhci HCSPARAMS1.
239 * - num_online_cpus: maximum msi-x vectors per CPUs core.
240 * Add additional 1 vector to ensure always available interrupt.
241 */
242 xhci->msix_count = min(num_online_cpus() + 1,
243 HCS_MAX_INTRS(xhci->hcs_params1));
244
245 xhci->msix_entries =
246 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
Greg Kroah-Hartman86871972010-11-11 09:41:02 -0800247 GFP_KERNEL);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700248 if (!xhci->msix_entries) {
249 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
250 return -ENOMEM;
251 }
Dong Nguyen43b86af2010-07-21 16:56:08 -0700252
253 for (i = 0; i < xhci->msix_count; i++) {
254 xhci->msix_entries[i].entry = i;
255 xhci->msix_entries[i].vector = 0;
256 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700257
258 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
259 if (ret) {
260 xhci_err(xhci, "Failed to enable MSI-X\n");
261 goto free_entries;
262 }
263
Dong Nguyen43b86af2010-07-21 16:56:08 -0700264 for (i = 0; i < xhci->msix_count; i++) {
265 ret = request_irq(xhci->msix_entries[i].vector,
266 (irq_handler_t)xhci_msi_irq,
267 0, "xhci_hcd", xhci_to_hcd(xhci));
268 if (ret)
269 goto disable_msix;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700270 }
Dong Nguyen43b86af2010-07-21 16:56:08 -0700271
Andiry Xu00292272010-12-27 17:39:02 +0800272 hcd->msix_enabled = 1;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700273 return ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700274
275disable_msix:
Dong Nguyen43b86af2010-07-21 16:56:08 -0700276 xhci_err(xhci, "disable MSI-X interrupt\n");
277 xhci_free_irq(xhci);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700278 pci_disable_msix(pdev);
279free_entries:
280 kfree(xhci->msix_entries);
281 xhci->msix_entries = NULL;
282 return ret;
283}
284
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700285/* Free any IRQs and disable MSI-X */
286static void xhci_cleanup_msix(struct xhci_hcd *xhci)
287{
Andiry Xu00292272010-12-27 17:39:02 +0800288 struct usb_hcd *hcd = xhci_to_hcd(xhci);
289 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700290
Dong Nguyen43b86af2010-07-21 16:56:08 -0700291 xhci_free_irq(xhci);
292
293 if (xhci->msix_entries) {
294 pci_disable_msix(pdev);
295 kfree(xhci->msix_entries);
296 xhci->msix_entries = NULL;
297 } else {
298 pci_disable_msi(pdev);
299 }
300
Andiry Xu00292272010-12-27 17:39:02 +0800301 hcd->msix_enabled = 0;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700302 return;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700303}
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700304
305/*
306 * Initialize memory for HCD and xHC (one-time init).
307 *
308 * Program the PAGESIZE register, initialize the device context array, create
309 * device contexts (?), set up a command ring segment (or two?), create event
310 * ring (one for now).
311 */
312int xhci_init(struct usb_hcd *hcd)
313{
314 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
315 int retval = 0;
316
317 xhci_dbg(xhci, "xhci_init\n");
318 spin_lock_init(&xhci->lock);
Sebastian Andrzej Siewiord7826592011-09-13 16:41:10 -0700319 if (xhci->hci_version == 0x95 && link_quirk) {
Sarah Sharpb0567b32009-08-07 14:04:36 -0700320 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
321 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
322 } else {
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700323 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
Sarah Sharpb0567b32009-08-07 14:04:36 -0700324 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700325 retval = xhci_mem_init(xhci, GFP_KERNEL);
326 xhci_dbg(xhci, "Finished xhci_init\n");
327
328 return retval;
329}
330
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700331/*-------------------------------------------------------------------------*/
332
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700333
334#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800335static void xhci_event_ring_work(unsigned long arg)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700336{
337 unsigned long flags;
338 int temp;
Sarah Sharp8e595a52009-07-27 12:03:31 -0700339 u64 temp_64;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700340 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
341 int i, j;
342
343 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
344
345 spin_lock_irqsave(&xhci->lock, flags);
346 temp = xhci_readl(xhci, &xhci->op_regs->status);
347 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
Sarah Sharp7bd89b42011-07-01 13:35:40 -0700348 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
349 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharpe4ab05d2009-09-16 16:42:30 -0700350 xhci_dbg(xhci, "HW died, polling stopped.\n");
351 spin_unlock_irqrestore(&xhci->lock, flags);
352 return;
353 }
354
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700355 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
356 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700357 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
358 xhci->error_bitmask = 0;
359 xhci_dbg(xhci, "Event ring:\n");
360 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
361 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
Sarah Sharp8e595a52009-07-27 12:03:31 -0700362 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
363 temp_64 &= ~ERST_PTR_MASK;
364 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700365 xhci_dbg(xhci, "Command ring:\n");
366 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
367 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
368 xhci_dbg_cmd_ptrs(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700369 for (i = 0; i < MAX_HC_SLOTS; ++i) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700370 if (!xhci->devs[i])
371 continue;
372 for (j = 0; j < 31; ++j) {
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700373 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700374 }
375 }
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700376 spin_unlock_irqrestore(&xhci->lock, flags);
377
378 if (!xhci->zombie)
379 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
380 else
381 xhci_dbg(xhci, "Quit polling the event ring.\n");
382}
383#endif
384
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800385static int xhci_run_finished(struct xhci_hcd *xhci)
386{
387 if (xhci_start(xhci)) {
388 xhci_halt(xhci);
389 return -ENODEV;
390 }
391 xhci->shared_hcd->state = HC_STATE_RUNNING;
392
393 if (xhci->quirks & XHCI_NEC_HOST)
394 xhci_ring_cmd_db(xhci);
395
396 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
397 return 0;
398}
399
Sebastian Andrzej Siewior3fd1ec52011-09-23 14:19:57 -0700400static int xhci_try_enable_msi(struct usb_hcd *hcd)
401{
402 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
403 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
404 int ret;
405
406 /*
407 * Some Fresco Logic host controllers advertise MSI, but fail to
408 * generate interrupts. Don't even try to enable MSI.
409 */
410 if (xhci->quirks & XHCI_BROKEN_MSI)
411 return 0;
412
413 /* unregister the legacy interrupt */
414 if (hcd->irq)
415 free_irq(hcd->irq, hcd);
416 hcd->irq = -1;
417
418 ret = xhci_setup_msix(xhci);
419 if (ret)
420 /* fall back to msi*/
421 ret = xhci_setup_msi(xhci);
422
423 if (!ret)
424 /* hcd->irq is -1, we have MSI */
425 return 0;
426
427 /* fall back to legacy interrupt*/
428 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
429 hcd->irq_descr, hcd);
430 if (ret) {
431 xhci_err(xhci, "request interrupt %d failed\n",
432 pdev->irq);
433 return ret;
434 }
435 hcd->irq = pdev->irq;
436 return 0;
437}
438
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700439/*
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700440 * Start the HC after it was halted.
441 *
442 * This function is called by the USB core when the HC driver is added.
443 * Its opposite is xhci_stop().
444 *
445 * xhci_init() must be called once before this function can be called.
446 * Reset the HC, enable device slot contexts, program DCBAAP, and
447 * set command ring pointer and event ring pointer.
448 *
449 * Setup MSI-X vectors and enable interrupts.
450 */
451int xhci_run(struct usb_hcd *hcd)
452{
453 u32 temp;
Sarah Sharp8e595a52009-07-27 12:03:31 -0700454 u64 temp_64;
Sebastian Andrzej Siewior3fd1ec52011-09-23 14:19:57 -0700455 int ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700456 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700457
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800458 /* Start the xHCI host controller running only after the USB 2.0 roothub
459 * is setup.
460 */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700461
Sarah Sharp0f2a7932009-04-27 19:57:12 -0700462 hcd->uses_new_polling = 1;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800463 if (!usb_hcd_is_primary_hcd(hcd))
464 return xhci_run_finished(xhci);
Sarah Sharp0f2a7932009-04-27 19:57:12 -0700465
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700466 xhci_dbg(xhci, "xhci_run\n");
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700467
Sebastian Andrzej Siewior3fd1ec52011-09-23 14:19:57 -0700468 ret = xhci_try_enable_msi(hcd);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700469 if (ret)
Sebastian Andrzej Siewior3fd1ec52011-09-23 14:19:57 -0700470 return ret;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700471
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700472#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
473 init_timer(&xhci->event_ring_timer);
474 xhci->event_ring_timer.data = (unsigned long) xhci;
Sarah Sharp23e3be12009-04-29 19:05:20 -0700475 xhci->event_ring_timer.function = xhci_event_ring_work;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700476 /* Poll the event ring */
477 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
478 xhci->zombie = 0;
479 xhci_dbg(xhci, "Setting event ring polling timer\n");
480 add_timer(&xhci->event_ring_timer);
481#endif
482
Sarah Sharp66e49d82009-07-27 12:03:46 -0700483 xhci_dbg(xhci, "Command ring memory map follows:\n");
484 xhci_debug_ring(xhci, xhci->cmd_ring);
485 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
486 xhci_dbg_cmd_ptrs(xhci);
487
488 xhci_dbg(xhci, "ERST memory map follows:\n");
489 xhci_dbg_erst(xhci, &xhci->erst);
490 xhci_dbg(xhci, "Event ring:\n");
491 xhci_debug_ring(xhci, xhci->event_ring);
492 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
493 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
494 temp_64 &= ~ERST_PTR_MASK;
495 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
496
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700497 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
498 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
Sarah Sharpa4d88302009-05-14 11:44:26 -0700499 temp &= ~ER_IRQ_INTERVAL_MASK;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700500 temp |= (u32) 160;
501 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
502
503 /* Set the HCD state before we enable the irqs */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700504 temp = xhci_readl(xhci, &xhci->op_regs->command);
505 temp |= (CMD_EIE);
506 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
507 temp);
508 xhci_writel(xhci, temp, &xhci->op_regs->command);
509
510 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700511 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
512 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700513 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
514 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800515 xhci_print_ir_set(xhci, 0);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700516
Sarah Sharp02386342010-05-24 13:25:28 -0700517 if (xhci->quirks & XHCI_NEC_HOST)
518 xhci_queue_vendor_command(xhci, 0, 0, 0,
519 TRB_TYPE(TRB_NEC_GET_FW));
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700520
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800521 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700522 return 0;
523}
524
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800525static void xhci_only_stop_hcd(struct usb_hcd *hcd)
526{
527 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
528
529 spin_lock_irq(&xhci->lock);
530 xhci_halt(xhci);
531
532 /* The shared_hcd is going to be deallocated shortly (the USB core only
533 * calls this function when allocation fails in usb_add_hcd(), or
534 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
535 */
536 xhci->shared_hcd = NULL;
537 spin_unlock_irq(&xhci->lock);
538}
539
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700540/*
541 * Stop xHCI driver.
542 *
543 * This function is called by the USB core when the HC driver is removed.
544 * Its opposite is xhci_run().
545 *
546 * Disable device contexts, disable IRQs, and quiesce the HC.
547 * Reset the HC, finish any completed transactions, and cleanup memory.
548 */
549void xhci_stop(struct usb_hcd *hcd)
550{
551 u32 temp;
552 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
553
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800554 if (!usb_hcd_is_primary_hcd(hcd)) {
555 xhci_only_stop_hcd(xhci->shared_hcd);
556 return;
557 }
558
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700559 spin_lock_irq(&xhci->lock);
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800560 /* Make sure the xHC is halted for a USB3 roothub
561 * (xhci_stop() could be called as part of failed init).
562 */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700563 xhci_halt(xhci);
564 xhci_reset(xhci);
565 spin_unlock_irq(&xhci->lock);
566
Zhang Rui40a9fb12010-12-17 13:17:04 -0800567 xhci_cleanup_msix(xhci);
568
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700569#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
570 /* Tell the event ring poll function not to reschedule */
571 xhci->zombie = 1;
572 del_timer_sync(&xhci->event_ring_timer);
573#endif
574
Andiry Xuc41136b2011-03-22 17:08:14 +0800575 if (xhci->quirks & XHCI_AMD_PLL_FIX)
576 usb_amd_dev_put();
577
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700578 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
579 temp = xhci_readl(xhci, &xhci->op_regs->status);
580 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
581 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
582 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
583 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800584 xhci_print_ir_set(xhci, 0);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700585
586 xhci_dbg(xhci, "cleaning up memory\n");
587 xhci_mem_cleanup(xhci);
588 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
589 xhci_readl(xhci, &xhci->op_regs->status));
590}
591
592/*
593 * Shutdown HC (not bus-specific)
594 *
595 * This is called when the machine is rebooting or halting. We assume that the
596 * machine will be powered off, and the HC's internal state will be reset.
597 * Don't bother to free memory.
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800598 *
599 * This will only ever be called with the main usb_hcd (the USB3 roothub).
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700600 */
601void xhci_shutdown(struct usb_hcd *hcd)
602{
603 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
604
605 spin_lock_irq(&xhci->lock);
606 xhci_halt(xhci);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700607 spin_unlock_irq(&xhci->lock);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700608
Zhang Rui40a9fb12010-12-17 13:17:04 -0800609 xhci_cleanup_msix(xhci);
610
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700611 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
612 xhci_readl(xhci, &xhci->op_regs->status));
613}
614
Sarah Sharpb5b5c3a2010-10-15 11:24:14 -0700615#ifdef CONFIG_PM
Andiry Xu5535b1d2010-10-14 07:23:06 -0700616static void xhci_save_registers(struct xhci_hcd *xhci)
617{
618 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
619 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
620 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
621 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
622 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
623 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
624 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
625 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
626 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
627}
628
629static void xhci_restore_registers(struct xhci_hcd *xhci)
630{
631 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
632 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
633 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
634 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
635 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
636 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
637 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
638 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
639}
640
Sarah Sharp89821322010-11-12 11:59:31 -0800641static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
642{
643 u64 val_64;
644
645 /* step 2: initialize command ring buffer */
646 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
647 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
648 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
649 xhci->cmd_ring->dequeue) &
650 (u64) ~CMD_RING_RSVD_BITS) |
651 xhci->cmd_ring->cycle_state;
652 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
653 (long unsigned long) val_64);
654 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
655}
656
657/*
658 * The whole command ring must be cleared to zero when we suspend the host.
659 *
660 * The host doesn't save the command ring pointer in the suspend well, so we
661 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
662 * aligned, because of the reserved bits in the command ring dequeue pointer
663 * register. Therefore, we can't just set the dequeue pointer back in the
664 * middle of the ring (TRBs are 16-byte aligned).
665 */
666static void xhci_clear_command_ring(struct xhci_hcd *xhci)
667{
668 struct xhci_ring *ring;
669 struct xhci_segment *seg;
670
671 ring = xhci->cmd_ring;
672 seg = ring->deq_seg;
673 do {
674 memset(seg->trbs, 0, SEGMENT_SIZE);
675 seg = seg->next;
676 } while (seg != ring->deq_seg);
677
678 /* Reset the software enqueue and dequeue pointers */
679 ring->deq_seg = ring->first_seg;
680 ring->dequeue = ring->first_seg->trbs;
681 ring->enq_seg = ring->deq_seg;
682 ring->enqueue = ring->dequeue;
683
684 /*
685 * Ring is now zeroed, so the HW should look for change of ownership
686 * when the cycle bit is set to 1.
687 */
688 ring->cycle_state = 1;
689
690 /*
691 * Reset the hardware dequeue pointer.
692 * Yes, this will need to be re-written after resume, but we're paranoid
693 * and want to make sure the hardware doesn't access bogus memory
694 * because, say, the BIOS or an SMI started the host without changing
695 * the command ring pointers.
696 */
697 xhci_set_cmd_ring_deq(xhci);
698}
699
Andiry Xu5535b1d2010-10-14 07:23:06 -0700700/*
701 * Stop HC (not bus-specific)
702 *
703 * This is called when the machine transition into S3/S4 mode.
704 *
705 */
706int xhci_suspend(struct xhci_hcd *xhci)
707{
708 int rc = 0;
709 struct usb_hcd *hcd = xhci_to_hcd(xhci);
710 u32 command;
Andiry Xu00292272010-12-27 17:39:02 +0800711 int i;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700712
713 spin_lock_irq(&xhci->lock);
714 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Sarah Sharpb3209372011-03-07 11:24:07 -0800715 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700716 /* step 1: stop endpoint */
717 /* skipped assuming that port suspend has done */
718
719 /* step 2: clear Run/Stop bit */
720 command = xhci_readl(xhci, &xhci->op_regs->command);
721 command &= ~CMD_RUN;
722 xhci_writel(xhci, command, &xhci->op_regs->command);
723 if (handshake(xhci, &xhci->op_regs->status,
724 STS_HALT, STS_HALT, 100*100)) {
725 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
726 spin_unlock_irq(&xhci->lock);
727 return -ETIMEDOUT;
728 }
Sarah Sharp89821322010-11-12 11:59:31 -0800729 xhci_clear_command_ring(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700730
731 /* step 3: save registers */
732 xhci_save_registers(xhci);
733
734 /* step 4: set CSS flag */
735 command = xhci_readl(xhci, &xhci->op_regs->command);
736 command |= CMD_CSS;
737 xhci_writel(xhci, command, &xhci->op_regs->command);
738 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
739 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
740 spin_unlock_irq(&xhci->lock);
741 return -ETIMEDOUT;
742 }
Andiry Xu5535b1d2010-10-14 07:23:06 -0700743 spin_unlock_irq(&xhci->lock);
744
Andiry Xu00292272010-12-27 17:39:02 +0800745 /* step 5: remove core well power */
746 /* synchronize irq when using MSI-X */
747 if (xhci->msix_entries) {
748 for (i = 0; i < xhci->msix_count; i++)
749 synchronize_irq(xhci->msix_entries[i].vector);
750 }
751
Andiry Xu5535b1d2010-10-14 07:23:06 -0700752 return rc;
753}
754
755/*
756 * start xHC (not bus-specific)
757 *
758 * This is called when the machine transition from S3/S4 mode.
759 *
760 */
761int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
762{
763 u32 command, temp = 0;
764 struct usb_hcd *hcd = xhci_to_hcd(xhci);
Sarah Sharp65b22f92010-12-17 12:35:05 -0800765 struct usb_hcd *secondary_hcd;
Andiry Xu019a35f2011-01-06 15:43:17 +0800766 int retval;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700767
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800768 /* Wait a bit if either of the roothubs need to settle from the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300769 * transition into bus suspend.
Sarah Sharp20b67cf2010-12-15 12:47:14 -0800770 */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800771 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
772 time_before(jiffies,
773 xhci->bus_state[1].next_statechange))
Andiry Xu5535b1d2010-10-14 07:23:06 -0700774 msleep(100);
775
776 spin_lock_irq(&xhci->lock);
Maarten Lankhorstc877b3b2011-06-15 23:47:21 +0200777 if (xhci->quirks & XHCI_RESET_ON_RESUME)
778 hibernated = true;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700779
780 if (!hibernated) {
781 /* step 1: restore register */
782 xhci_restore_registers(xhci);
783 /* step 2: initialize command ring buffer */
Sarah Sharp89821322010-11-12 11:59:31 -0800784 xhci_set_cmd_ring_deq(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700785 /* step 3: restore state and start state*/
786 /* step 3: set CRS flag */
787 command = xhci_readl(xhci, &xhci->op_regs->command);
788 command |= CMD_CRS;
789 xhci_writel(xhci, command, &xhci->op_regs->command);
790 if (handshake(xhci, &xhci->op_regs->status,
791 STS_RESTORE, 0, 10*100)) {
792 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
793 spin_unlock_irq(&xhci->lock);
794 return -ETIMEDOUT;
795 }
796 temp = xhci_readl(xhci, &xhci->op_regs->status);
797 }
798
799 /* If restore operation fails, re-initialize the HC during resume */
800 if ((temp & STS_SRE) || hibernated) {
Sarah Sharpfedd3832011-04-12 17:43:19 -0700801 /* Let the USB core know _both_ roothubs lost power. */
802 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
803 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700804
805 xhci_dbg(xhci, "Stop HCD\n");
806 xhci_halt(xhci);
807 xhci_reset(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700808 spin_unlock_irq(&xhci->lock);
Andiry Xu00292272010-12-27 17:39:02 +0800809 xhci_cleanup_msix(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700810
811#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
812 /* Tell the event ring poll function not to reschedule */
813 xhci->zombie = 1;
814 del_timer_sync(&xhci->event_ring_timer);
815#endif
816
817 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
818 temp = xhci_readl(xhci, &xhci->op_regs->status);
819 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
820 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
821 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
822 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800823 xhci_print_ir_set(xhci, 0);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700824
825 xhci_dbg(xhci, "cleaning up memory\n");
826 xhci_mem_cleanup(xhci);
827 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
828 xhci_readl(xhci, &xhci->op_regs->status));
829
Sarah Sharp65b22f92010-12-17 12:35:05 -0800830 /* USB core calls the PCI reinit and start functions twice:
831 * first with the primary HCD, and then with the secondary HCD.
832 * If we don't do the same, the host will never be started.
833 */
834 if (!usb_hcd_is_primary_hcd(hcd))
835 secondary_hcd = hcd;
836 else
837 secondary_hcd = xhci->shared_hcd;
838
839 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
840 retval = xhci_init(hcd->primary_hcd);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700841 if (retval)
842 return retval;
Sarah Sharp65b22f92010-12-17 12:35:05 -0800843 xhci_dbg(xhci, "Start the primary HCD\n");
844 retval = xhci_run(hcd->primary_hcd);
845 if (retval)
846 goto failed_restart;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700847
Sarah Sharp65b22f92010-12-17 12:35:05 -0800848 xhci_dbg(xhci, "Start the secondary HCD\n");
849 retval = xhci_run(secondary_hcd);
Sarah Sharpb3209372011-03-07 11:24:07 -0800850 if (!retval) {
Andiry Xu5535b1d2010-10-14 07:23:06 -0700851 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Sarah Sharpb3209372011-03-07 11:24:07 -0800852 set_bit(HCD_FLAG_HW_ACCESSIBLE,
853 &xhci->shared_hcd->flags);
854 }
Sarah Sharp65b22f92010-12-17 12:35:05 -0800855failed_restart:
Andiry Xu5535b1d2010-10-14 07:23:06 -0700856 hcd->state = HC_STATE_SUSPENDED;
Sarah Sharpb3209372011-03-07 11:24:07 -0800857 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700858 return retval;
859 }
860
Andiry Xu5535b1d2010-10-14 07:23:06 -0700861 /* step 4: set Run/Stop bit */
862 command = xhci_readl(xhci, &xhci->op_regs->command);
863 command |= CMD_RUN;
864 xhci_writel(xhci, command, &xhci->op_regs->command);
865 handshake(xhci, &xhci->op_regs->status, STS_HALT,
866 0, 250 * 1000);
867
868 /* step 5: walk topology and initialize portsc,
869 * portpmsc and portli
870 */
871 /* this is done in bus_resume */
872
873 /* step 6: restart each of the previously
874 * Running endpoints by ringing their doorbells
875 */
876
877 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Sarah Sharpb3209372011-03-07 11:24:07 -0800878 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700879
880 spin_unlock_irq(&xhci->lock);
881 return 0;
882}
Sarah Sharpb5b5c3a2010-10-15 11:24:14 -0700883#endif /* CONFIG_PM */
884
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700885/*-------------------------------------------------------------------------*/
886
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700887/**
888 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
889 * HCDs. Find the index for an endpoint given its descriptor. Use the return
890 * value to right shift 1 for the bitmask.
891 *
892 * Index = (epnum * 2) + direction - 1,
893 * where direction = 0 for OUT, 1 for IN.
894 * For control endpoints, the IN index is used (OUT index is unused), so
895 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
896 */
897unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
898{
899 unsigned int index;
900 if (usb_endpoint_xfer_control(desc))
901 index = (unsigned int) (usb_endpoint_num(desc)*2);
902 else
903 index = (unsigned int) (usb_endpoint_num(desc)*2) +
904 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
905 return index;
906}
907
Sarah Sharpf94e01862009-04-27 19:58:38 -0700908/* Find the flag for this endpoint (for use in the control context). Use the
909 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
910 * bit 1, etc.
911 */
912unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
913{
914 return 1 << (xhci_get_endpoint_index(desc) + 1);
915}
916
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700917/* Find the flag for this endpoint (for use in the control context). Use the
918 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
919 * bit 1, etc.
920 */
921unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
922{
923 return 1 << (ep_index + 1);
924}
925
Sarah Sharpf94e01862009-04-27 19:58:38 -0700926/* Compute the last valid endpoint context index. Basically, this is the
927 * endpoint index plus one. For slot contexts with more than valid endpoint,
928 * we find the most significant bit set in the added contexts flags.
929 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
930 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
931 */
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700932unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
Sarah Sharpf94e01862009-04-27 19:58:38 -0700933{
934 return fls(added_ctxs) - 1;
935}
936
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700937/* Returns 1 if the arguments are OK;
938 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
939 */
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800940static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
Andiry Xu64927732010-10-14 07:22:45 -0700941 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
942 const char *func) {
943 struct xhci_hcd *xhci;
944 struct xhci_virt_device *virt_dev;
945
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700946 if (!hcd || (check_ep && !ep) || !udev) {
947 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
948 func);
949 return -EINVAL;
950 }
951 if (!udev->parent) {
952 printk(KERN_DEBUG "xHCI %s called for root hub\n",
953 func);
954 return 0;
955 }
Andiry Xu64927732010-10-14 07:22:45 -0700956
Sarah Sharp7bd89b42011-07-01 13:35:40 -0700957 xhci = hcd_to_xhci(hcd);
958 if (xhci->xhc_state & XHCI_STATE_HALTED)
959 return -ENODEV;
960
Andiry Xu64927732010-10-14 07:22:45 -0700961 if (check_virt_dev) {
sifram.rajas@gmail.com73ddc242011-09-02 11:06:00 -0700962 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
Andiry Xu64927732010-10-14 07:22:45 -0700963 printk(KERN_DEBUG "xHCI %s called with unaddressed "
964 "device\n", func);
965 return -EINVAL;
966 }
967
968 virt_dev = xhci->devs[udev->slot_id];
969 if (virt_dev->udev != udev) {
970 printk(KERN_DEBUG "xHCI %s called with udev and "
971 "virt_dev does not match\n", func);
972 return -EINVAL;
973 }
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700974 }
Andiry Xu64927732010-10-14 07:22:45 -0700975
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700976 return 1;
977}
978
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -0700979static int xhci_configure_endpoint(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -0700980 struct usb_device *udev, struct xhci_command *command,
981 bool ctx_change, bool must_succeed);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -0700982
983/*
984 * Full speed devices may have a max packet size greater than 8 bytes, but the
985 * USB core doesn't know that until it reads the first 8 bytes of the
986 * descriptor. If the usb_device's max packet size changes after that point,
987 * we need to issue an evaluate context command and wait on it.
988 */
989static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
990 unsigned int ep_index, struct urb *urb)
991{
992 struct xhci_container_ctx *in_ctx;
993 struct xhci_container_ctx *out_ctx;
994 struct xhci_input_control_ctx *ctrl_ctx;
995 struct xhci_ep_ctx *ep_ctx;
996 int max_packet_size;
997 int hw_max_packet_size;
998 int ret = 0;
999
1000 out_ctx = xhci->devs[slot_id]->out_ctx;
1001 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11001002 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001003 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001004 if (hw_max_packet_size != max_packet_size) {
1005 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1006 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1007 max_packet_size);
1008 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1009 hw_max_packet_size);
1010 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1011
1012 /* Set up the modified control endpoint 0 */
Sarah Sharp913a8a32009-09-04 10:53:13 -07001013 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1014 xhci->devs[slot_id]->out_ctx, ep_index);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001015 in_ctx = xhci->devs[slot_id]->in_ctx;
1016 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +11001017 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1018 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001019
1020 /* Set up the input context flags for the command */
1021 /* FIXME: This won't work if a non-default control endpoint
1022 * changes max packet sizes.
1023 */
1024 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001025 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001026 ctrl_ctx->drop_flags = 0;
1027
1028 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1029 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1030 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1031 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1032
Sarah Sharp913a8a32009-09-04 10:53:13 -07001033 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1034 true, false);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001035
1036 /* Clean up the input context for later use by bandwidth
1037 * functions.
1038 */
Matt Evans28ccd292011-03-29 13:40:46 +11001039 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001040 }
1041 return ret;
1042}
1043
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001044/*
1045 * non-error returns are a promise to giveback() the urb later
1046 * we drop ownership so next owner (or urb unlink) can get it
1047 */
1048int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1049{
1050 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Andiry Xu2ffdea22011-09-02 11:05:57 -07001051 struct xhci_td *buffer;
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001052 unsigned long flags;
1053 int ret = 0;
1054 unsigned int slot_id, ep_index;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001055 struct urb_priv *urb_priv;
1056 int size, i;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001057
Andiry Xu64927732010-10-14 07:22:45 -07001058 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1059 true, true, __func__) <= 0)
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001060 return -EINVAL;
1061
1062 slot_id = urb->dev->slot_id;
1063 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001064
Alan Stern541c7d42010-06-22 16:39:10 -04001065 if (!HCD_HW_ACCESSIBLE(hcd)) {
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001066 if (!in_interrupt())
1067 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1068 ret = -ESHUTDOWN;
1069 goto exit;
1070 }
Andiry Xu8e51adc2010-07-22 15:23:31 -07001071
1072 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1073 size = urb->number_of_packets;
1074 else
1075 size = 1;
1076
1077 urb_priv = kzalloc(sizeof(struct urb_priv) +
1078 size * sizeof(struct xhci_td *), mem_flags);
1079 if (!urb_priv)
1080 return -ENOMEM;
1081
Andiry Xu2ffdea22011-09-02 11:05:57 -07001082 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1083 if (!buffer) {
1084 kfree(urb_priv);
1085 return -ENOMEM;
1086 }
1087
Andiry Xu8e51adc2010-07-22 15:23:31 -07001088 for (i = 0; i < size; i++) {
Andiry Xu2ffdea22011-09-02 11:05:57 -07001089 urb_priv->td[i] = buffer;
1090 buffer++;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001091 }
1092
1093 urb_priv->length = size;
1094 urb_priv->td_cnt = 0;
1095 urb->hcpriv = urb_priv;
1096
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001097 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1098 /* Check to see if the max packet size for the default control
1099 * endpoint changed during FS device enumeration
1100 */
1101 if (urb->dev->speed == USB_SPEED_FULL) {
1102 ret = xhci_check_maxpacket(xhci, slot_id,
1103 ep_index, urb);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001104 if (ret < 0) {
1105 xhci_urb_free_priv(xhci, urb_priv);
1106 urb->hcpriv = NULL;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001107 return ret;
Sarah Sharpd13565c2011-07-22 14:34:34 -07001108 }
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001109 }
1110
Sarah Sharpb11069f2009-07-27 12:03:23 -07001111 /* We have a spinlock and interrupts disabled, so we must pass
1112 * atomic context to this function, which may allocate memory.
1113 */
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001114 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001115 if (xhci->xhc_state & XHCI_STATE_DYING)
1116 goto dying;
Sarah Sharpb11069f2009-07-27 12:03:23 -07001117 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
Sarah Sharp23e3be12009-04-29 19:05:20 -07001118 slot_id, ep_index);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001119 if (ret)
1120 goto free_priv;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001121 spin_unlock_irqrestore(&xhci->lock, flags);
1122 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1123 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001124 if (xhci->xhc_state & XHCI_STATE_DYING)
1125 goto dying;
Sarah Sharp8df75f42010-04-02 15:34:16 -07001126 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1127 EP_GETTING_STREAMS) {
1128 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1129 "is transitioning to using streams.\n");
1130 ret = -EINVAL;
1131 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1132 EP_GETTING_NO_STREAMS) {
1133 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1134 "is transitioning to "
1135 "not having streams.\n");
1136 ret = -EINVAL;
1137 } else {
1138 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1139 slot_id, ep_index);
1140 }
Sarah Sharpd13565c2011-07-22 14:34:34 -07001141 if (ret)
1142 goto free_priv;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001143 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp624defa2009-09-02 12:14:28 -07001144 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1145 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001146 if (xhci->xhc_state & XHCI_STATE_DYING)
1147 goto dying;
Sarah Sharp624defa2009-09-02 12:14:28 -07001148 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1149 slot_id, ep_index);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001150 if (ret)
1151 goto free_priv;
Sarah Sharp624defa2009-09-02 12:14:28 -07001152 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001153 } else {
Andiry Xu787f4e52010-07-22 15:23:52 -07001154 spin_lock_irqsave(&xhci->lock, flags);
1155 if (xhci->xhc_state & XHCI_STATE_DYING)
1156 goto dying;
1157 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1158 slot_id, ep_index);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001159 if (ret)
1160 goto free_priv;
Andiry Xu787f4e52010-07-22 15:23:52 -07001161 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001162 }
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001163exit:
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001164 return ret;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001165dying:
1166 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1167 "non-responsive xHCI host.\n",
1168 urb->ep->desc.bEndpointAddress, urb);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001169 ret = -ESHUTDOWN;
1170free_priv:
1171 xhci_urb_free_priv(xhci, urb_priv);
1172 urb->hcpriv = NULL;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001173 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharpd13565c2011-07-22 14:34:34 -07001174 return ret;
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001175}
1176
Sarah Sharp021bff92010-07-29 22:12:20 -07001177/* Get the right ring for the given URB.
1178 * If the endpoint supports streams, boundary check the URB's stream ID.
1179 * If the endpoint doesn't support streams, return the singular endpoint ring.
1180 */
1181static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1182 struct urb *urb)
1183{
1184 unsigned int slot_id;
1185 unsigned int ep_index;
1186 unsigned int stream_id;
1187 struct xhci_virt_ep *ep;
1188
1189 slot_id = urb->dev->slot_id;
1190 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1191 stream_id = urb->stream_id;
1192 ep = &xhci->devs[slot_id]->eps[ep_index];
1193 /* Common case: no streams */
1194 if (!(ep->ep_state & EP_HAS_STREAMS))
1195 return ep->ring;
1196
1197 if (stream_id == 0) {
1198 xhci_warn(xhci,
1199 "WARN: Slot ID %u, ep index %u has streams, "
1200 "but URB has no stream ID.\n",
1201 slot_id, ep_index);
1202 return NULL;
1203 }
1204
1205 if (stream_id < ep->stream_info->num_streams)
1206 return ep->stream_info->stream_rings[stream_id];
1207
1208 xhci_warn(xhci,
1209 "WARN: Slot ID %u, ep index %u has "
1210 "stream IDs 1 to %u allocated, "
1211 "but stream ID %u is requested.\n",
1212 slot_id, ep_index,
1213 ep->stream_info->num_streams - 1,
1214 stream_id);
1215 return NULL;
1216}
1217
Sarah Sharpae636742009-04-29 19:02:31 -07001218/*
1219 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1220 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1221 * should pick up where it left off in the TD, unless a Set Transfer Ring
1222 * Dequeue Pointer is issued.
1223 *
1224 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1225 * the ring. Since the ring is a contiguous structure, they can't be physically
1226 * removed. Instead, there are two options:
1227 *
1228 * 1) If the HC is in the middle of processing the URB to be canceled, we
1229 * simply move the ring's dequeue pointer past those TRBs using the Set
1230 * Transfer Ring Dequeue Pointer command. This will be the common case,
1231 * when drivers timeout on the last submitted URB and attempt to cancel.
1232 *
1233 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1234 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1235 * HC will need to invalidate the any TRBs it has cached after the stop
1236 * endpoint command, as noted in the xHCI 0.95 errata.
1237 *
1238 * 3) The TD may have completed by the time the Stop Endpoint Command
1239 * completes, so software needs to handle that case too.
1240 *
1241 * This function should protect against the TD enqueueing code ringing the
1242 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1243 * It also needs to account for multiple cancellations on happening at the same
1244 * time for the same endpoint.
1245 *
1246 * Note that this function can be called in any context, or so says
1247 * usb_hcd_unlink_urb()
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001248 */
1249int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1250{
Sarah Sharpae636742009-04-29 19:02:31 -07001251 unsigned long flags;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001252 int ret, i;
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001253 u32 temp;
Sarah Sharpae636742009-04-29 19:02:31 -07001254 struct xhci_hcd *xhci;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001255 struct urb_priv *urb_priv;
Sarah Sharpae636742009-04-29 19:02:31 -07001256 struct xhci_td *td;
1257 unsigned int ep_index;
1258 struct xhci_ring *ep_ring;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001259 struct xhci_virt_ep *ep;
Sarah Sharpae636742009-04-29 19:02:31 -07001260
1261 xhci = hcd_to_xhci(hcd);
1262 spin_lock_irqsave(&xhci->lock, flags);
1263 /* Make sure the URB hasn't completed or been unlinked already */
1264 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1265 if (ret || !urb->hcpriv)
1266 goto done;
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001267 temp = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -08001268 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001269 xhci_dbg(xhci, "HW died, freeing TD.\n");
Andiry Xu8e51adc2010-07-22 15:23:31 -07001270 urb_priv = urb->hcpriv;
Sarah Sharp585df1d2011-08-02 15:43:40 -07001271 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1272 td = urb_priv->td[i];
1273 if (!list_empty(&td->td_list))
1274 list_del_init(&td->td_list);
1275 if (!list_empty(&td->cancelled_td_list))
1276 list_del_init(&td->cancelled_td_list);
1277 }
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001278
1279 usb_hcd_unlink_urb_from_ep(hcd, urb);
1280 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp214f76f2010-10-26 11:22:02 -07001281 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
Andiry Xu8e51adc2010-07-22 15:23:31 -07001282 xhci_urb_free_priv(xhci, urb_priv);
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001283 return ret;
1284 }
Sarah Sharp7bd89b42011-07-01 13:35:40 -07001285 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1286 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001287 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1288 "non-responsive xHCI host.\n",
1289 urb->ep->desc.bEndpointAddress, urb);
1290 /* Let the stop endpoint command watchdog timer (which set this
1291 * state) finish cleaning up the endpoint TD lists. We must
1292 * have caught it in the middle of dropping a lock and giving
1293 * back an URB.
1294 */
1295 goto done;
1296 }
Sarah Sharpae636742009-04-29 19:02:31 -07001297
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001298 xhci_dbg(xhci, "Cancel URB %p\n", urb);
Sarah Sharp66e49d82009-07-27 12:03:46 -07001299 xhci_dbg(xhci, "Event ring:\n");
1300 xhci_debug_ring(xhci, xhci->event_ring);
Sarah Sharpae636742009-04-29 19:02:31 -07001301 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001302 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001303 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1304 if (!ep_ring) {
1305 ret = -EINVAL;
1306 goto done;
1307 }
1308
Sarah Sharp66e49d82009-07-27 12:03:46 -07001309 xhci_dbg(xhci, "Endpoint ring:\n");
1310 xhci_debug_ring(xhci, ep_ring);
Sarah Sharpae636742009-04-29 19:02:31 -07001311
Andiry Xu8e51adc2010-07-22 15:23:31 -07001312 urb_priv = urb->hcpriv;
1313
1314 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1315 td = urb_priv->td[i];
1316 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1317 }
1318
Sarah Sharpae636742009-04-29 19:02:31 -07001319 /* Queue a stop endpoint command, but only if this is
1320 * the first cancellation to be handled.
1321 */
Sarah Sharp678539c2009-10-27 10:55:52 -07001322 if (!(ep->ep_state & EP_HALT_PENDING)) {
1323 ep->ep_state |= EP_HALT_PENDING;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001324 ep->stop_cmds_pending++;
1325 ep->stop_cmd_timer.expires = jiffies +
1326 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1327 add_timer(&ep->stop_cmd_timer);
Andiry Xube88fe42010-10-14 07:22:57 -07001328 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
Sarah Sharp23e3be12009-04-29 19:05:20 -07001329 xhci_ring_cmd_db(xhci);
Sarah Sharpae636742009-04-29 19:02:31 -07001330 }
1331done:
1332 spin_unlock_irqrestore(&xhci->lock, flags);
1333 return ret;
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001334}
1335
Sarah Sharpf94e01862009-04-27 19:58:38 -07001336/* Drop an endpoint from a new bandwidth configuration for this device.
1337 * Only one call to this function is allowed per endpoint before
1338 * check_bandwidth() or reset_bandwidth() must be called.
1339 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1340 * add the endpoint to the schedule with possibly new parameters denoted by a
1341 * different endpoint descriptor in usb_host_endpoint.
1342 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1343 * not allowed.
Sarah Sharpf88ba782009-05-14 11:44:22 -07001344 *
1345 * The USB core will not allow URBs to be queued to an endpoint that is being
1346 * disabled, so there's no need for mutual exclusion to protect
1347 * the xhci->devs[slot_id] structure.
Sarah Sharpf94e01862009-04-27 19:58:38 -07001348 */
1349int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1350 struct usb_host_endpoint *ep)
1351{
Sarah Sharpf94e01862009-04-27 19:58:38 -07001352 struct xhci_hcd *xhci;
John Yound115b042009-07-27 12:05:15 -07001353 struct xhci_container_ctx *in_ctx, *out_ctx;
1354 struct xhci_input_control_ctx *ctrl_ctx;
1355 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001356 unsigned int last_ctx;
1357 unsigned int ep_index;
1358 struct xhci_ep_ctx *ep_ctx;
1359 u32 drop_flag;
1360 u32 new_add_flags, new_drop_flags, new_slot_info;
1361 int ret;
1362
Andiry Xu64927732010-10-14 07:22:45 -07001363 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001364 if (ret <= 0)
1365 return ret;
1366 xhci = hcd_to_xhci(hcd);
Sarah Sharpfe6c6c12011-05-23 16:41:17 -07001367 if (xhci->xhc_state & XHCI_STATE_DYING)
1368 return -ENODEV;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001369
Sarah Sharpfe6c6c12011-05-23 16:41:17 -07001370 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001371 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1372 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1373 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1374 __func__, drop_flag);
1375 return 0;
1376 }
1377
Sarah Sharpf94e01862009-04-27 19:58:38 -07001378 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
John Yound115b042009-07-27 12:05:15 -07001379 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1380 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001381 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001382 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001383 /* If the HC already knows the endpoint is disabled,
1384 * or the HCD has noted it is disabled, ignore this request
1385 */
Matt Evansf5960b62011-06-01 10:22:55 +10001386 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1387 cpu_to_le32(EP_STATE_DISABLED)) ||
Matt Evans28ccd292011-03-29 13:40:46 +11001388 le32_to_cpu(ctrl_ctx->drop_flags) &
1389 xhci_get_endpoint_flag(&ep->desc)) {
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001390 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1391 __func__, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001392 return 0;
1393 }
1394
Matt Evans28ccd292011-03-29 13:40:46 +11001395 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1396 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001397
Matt Evans28ccd292011-03-29 13:40:46 +11001398 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1399 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001400
Matt Evans28ccd292011-03-29 13:40:46 +11001401 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
John Yound115b042009-07-27 12:05:15 -07001402 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001403 /* Update the last valid endpoint context, if we deleted the last one */
Matt Evans28ccd292011-03-29 13:40:46 +11001404 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1405 LAST_CTX(last_ctx)) {
1406 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1407 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001408 }
Matt Evans28ccd292011-03-29 13:40:46 +11001409 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001410
1411 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1412
Sarah Sharpf94e01862009-04-27 19:58:38 -07001413 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1414 (unsigned int) ep->desc.bEndpointAddress,
1415 udev->slot_id,
1416 (unsigned int) new_drop_flags,
1417 (unsigned int) new_add_flags,
1418 (unsigned int) new_slot_info);
1419 return 0;
1420}
1421
1422/* Add an endpoint to a new possible bandwidth configuration for this device.
1423 * Only one call to this function is allowed per endpoint before
1424 * check_bandwidth() or reset_bandwidth() must be called.
1425 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1426 * add the endpoint to the schedule with possibly new parameters denoted by a
1427 * different endpoint descriptor in usb_host_endpoint.
1428 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1429 * not allowed.
Sarah Sharpf88ba782009-05-14 11:44:22 -07001430 *
1431 * The USB core will not allow URBs to be queued to an endpoint until the
1432 * configuration or alt setting is installed in the device, so there's no need
1433 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
Sarah Sharpf94e01862009-04-27 19:58:38 -07001434 */
1435int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1436 struct usb_host_endpoint *ep)
1437{
Sarah Sharpf94e01862009-04-27 19:58:38 -07001438 struct xhci_hcd *xhci;
John Yound115b042009-07-27 12:05:15 -07001439 struct xhci_container_ctx *in_ctx, *out_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001440 unsigned int ep_index;
1441 struct xhci_ep_ctx *ep_ctx;
John Yound115b042009-07-27 12:05:15 -07001442 struct xhci_slot_ctx *slot_ctx;
1443 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001444 u32 added_ctxs;
1445 unsigned int last_ctx;
1446 u32 new_add_flags, new_drop_flags, new_slot_info;
Sarah Sharpfa75ac32011-06-05 23:10:04 -07001447 struct xhci_virt_device *virt_dev;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001448 int ret = 0;
1449
Andiry Xu64927732010-10-14 07:22:45 -07001450 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001451 if (ret <= 0) {
1452 /* So we won't queue a reset ep command for a root hub */
1453 ep->hcpriv = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001454 return ret;
Sarah Sharpa1587d92009-07-27 12:03:15 -07001455 }
Sarah Sharpf94e01862009-04-27 19:58:38 -07001456 xhci = hcd_to_xhci(hcd);
Sarah Sharpfe6c6c12011-05-23 16:41:17 -07001457 if (xhci->xhc_state & XHCI_STATE_DYING)
1458 return -ENODEV;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001459
1460 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1461 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1462 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1463 /* FIXME when we have to issue an evaluate endpoint command to
1464 * deal with ep0 max packet size changing once we get the
1465 * descriptors
1466 */
1467 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1468 __func__, added_ctxs);
1469 return 0;
1470 }
1471
Sarah Sharpfa75ac32011-06-05 23:10:04 -07001472 virt_dev = xhci->devs[udev->slot_id];
1473 in_ctx = virt_dev->in_ctx;
1474 out_ctx = virt_dev->out_ctx;
John Yound115b042009-07-27 12:05:15 -07001475 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001476 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001477 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Sarah Sharpfa75ac32011-06-05 23:10:04 -07001478
1479 /* If this endpoint is already in use, and the upper layers are trying
1480 * to add it again without dropping it, reject the addition.
1481 */
1482 if (virt_dev->eps[ep_index].ring &&
1483 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1484 xhci_get_endpoint_flag(&ep->desc))) {
1485 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1486 "without dropping it.\n",
1487 (unsigned int) ep->desc.bEndpointAddress);
1488 return -EINVAL;
1489 }
1490
Sarah Sharpf94e01862009-04-27 19:58:38 -07001491 /* If the HCD has already noted the endpoint is enabled,
1492 * ignore this request.
1493 */
Matt Evans28ccd292011-03-29 13:40:46 +11001494 if (le32_to_cpu(ctrl_ctx->add_flags) &
1495 xhci_get_endpoint_flag(&ep->desc)) {
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001496 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1497 __func__, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001498 return 0;
1499 }
1500
Sarah Sharpf88ba782009-05-14 11:44:22 -07001501 /*
1502 * Configuration and alternate setting changes must be done in
1503 * process context, not interrupt context (or so documenation
1504 * for usb_set_interface() and usb_set_configuration() claim).
1505 */
Sarah Sharpfa75ac32011-06-05 23:10:04 -07001506 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
Sarah Sharpf94e01862009-04-27 19:58:38 -07001507 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1508 __func__, ep->desc.bEndpointAddress);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001509 return -ENOMEM;
1510 }
1511
Matt Evans28ccd292011-03-29 13:40:46 +11001512 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1513 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001514
1515 /* If xhci_endpoint_disable() was called for this endpoint, but the
1516 * xHC hasn't been notified yet through the check_bandwidth() call,
1517 * this re-adds a new state for the endpoint from the new endpoint
1518 * descriptors. We must drop and re-add this endpoint, so we leave the
1519 * drop flags alone.
1520 */
Matt Evans28ccd292011-03-29 13:40:46 +11001521 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001522
John Yound115b042009-07-27 12:05:15 -07001523 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001524 /* Update the last valid endpoint context, if we just added one past */
Matt Evans28ccd292011-03-29 13:40:46 +11001525 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1526 LAST_CTX(last_ctx)) {
1527 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1528 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001529 }
Matt Evans28ccd292011-03-29 13:40:46 +11001530 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001531
Sarah Sharpa1587d92009-07-27 12:03:15 -07001532 /* Store the usb_device pointer for later use */
1533 ep->hcpriv = udev;
1534
Sarah Sharpf94e01862009-04-27 19:58:38 -07001535 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1536 (unsigned int) ep->desc.bEndpointAddress,
1537 udev->slot_id,
1538 (unsigned int) new_drop_flags,
1539 (unsigned int) new_add_flags,
1540 (unsigned int) new_slot_info);
1541 return 0;
1542}
1543
John Yound115b042009-07-27 12:05:15 -07001544static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
Sarah Sharpf94e01862009-04-27 19:58:38 -07001545{
John Yound115b042009-07-27 12:05:15 -07001546 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001547 struct xhci_ep_ctx *ep_ctx;
John Yound115b042009-07-27 12:05:15 -07001548 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001549 int i;
1550
1551 /* When a device's add flag and drop flag are zero, any subsequent
1552 * configure endpoint command will leave that endpoint's state
1553 * untouched. Make sure we don't leave any old state in the input
1554 * endpoint contexts.
1555 */
John Yound115b042009-07-27 12:05:15 -07001556 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1557 ctrl_ctx->drop_flags = 0;
1558 ctrl_ctx->add_flags = 0;
1559 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001560 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001561 /* Endpoint 0 is always valid */
Matt Evans28ccd292011-03-29 13:40:46 +11001562 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001563 for (i = 1; i < 31; ++i) {
John Yound115b042009-07-27 12:05:15 -07001564 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001565 ep_ctx->ep_info = 0;
1566 ep_ctx->ep_info2 = 0;
Sarah Sharp8e595a52009-07-27 12:03:31 -07001567 ep_ctx->deq = 0;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001568 ep_ctx->tx_info = 0;
1569 }
1570}
1571
Sarah Sharpf2217e82009-08-07 14:04:43 -07001572static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
Sarah Sharp00161f72011-04-28 12:23:23 -07001573 struct usb_device *udev, u32 *cmd_status)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001574{
1575 int ret;
1576
Sarah Sharp913a8a32009-09-04 10:53:13 -07001577 switch (*cmd_status) {
Sarah Sharpf2217e82009-08-07 14:04:43 -07001578 case COMP_ENOMEM:
1579 dev_warn(&udev->dev, "Not enough host controller resources "
1580 "for new device state.\n");
1581 ret = -ENOMEM;
1582 /* FIXME: can we allocate more resources for the HC? */
1583 break;
1584 case COMP_BW_ERR:
1585 dev_warn(&udev->dev, "Not enough bandwidth "
1586 "for new device state.\n");
1587 ret = -ENOSPC;
1588 /* FIXME: can we go back to the old state? */
1589 break;
1590 case COMP_TRB_ERR:
1591 /* the HCD set up something wrong */
1592 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1593 "add flag = 1, "
1594 "and endpoint is not disabled.\n");
1595 ret = -EINVAL;
1596 break;
Alex Hef6ba6fe2011-06-08 18:34:06 +08001597 case COMP_DEV_ERR:
1598 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1599 "configure command.\n");
1600 ret = -ENODEV;
1601 break;
Sarah Sharpf2217e82009-08-07 14:04:43 -07001602 case COMP_SUCCESS:
1603 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1604 ret = 0;
1605 break;
1606 default:
1607 xhci_err(xhci, "ERROR: unexpected command completion "
Sarah Sharp913a8a32009-09-04 10:53:13 -07001608 "code 0x%x.\n", *cmd_status);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001609 ret = -EINVAL;
1610 break;
1611 }
1612 return ret;
1613}
1614
1615static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
Sarah Sharp00161f72011-04-28 12:23:23 -07001616 struct usb_device *udev, u32 *cmd_status)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001617{
1618 int ret;
Sarah Sharp913a8a32009-09-04 10:53:13 -07001619 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
Sarah Sharpf2217e82009-08-07 14:04:43 -07001620
Sarah Sharp913a8a32009-09-04 10:53:13 -07001621 switch (*cmd_status) {
Sarah Sharpf2217e82009-08-07 14:04:43 -07001622 case COMP_EINVAL:
1623 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1624 "context command.\n");
1625 ret = -EINVAL;
1626 break;
1627 case COMP_EBADSLT:
1628 dev_warn(&udev->dev, "WARN: slot not enabled for"
1629 "evaluate context command.\n");
1630 case COMP_CTX_STATE:
1631 dev_warn(&udev->dev, "WARN: invalid context state for "
1632 "evaluate context command.\n");
1633 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1634 ret = -EINVAL;
1635 break;
Alex Hef6ba6fe2011-06-08 18:34:06 +08001636 case COMP_DEV_ERR:
1637 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1638 "context command.\n");
1639 ret = -ENODEV;
1640 break;
Alex He1bb73a82011-05-05 18:14:12 +08001641 case COMP_MEL_ERR:
1642 /* Max Exit Latency too large error */
1643 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1644 ret = -EINVAL;
1645 break;
Sarah Sharpf2217e82009-08-07 14:04:43 -07001646 case COMP_SUCCESS:
1647 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1648 ret = 0;
1649 break;
1650 default:
1651 xhci_err(xhci, "ERROR: unexpected command completion "
Sarah Sharp913a8a32009-09-04 10:53:13 -07001652 "code 0x%x.\n", *cmd_status);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001653 ret = -EINVAL;
1654 break;
1655 }
1656 return ret;
1657}
1658
Sarah Sharp2cf95c12011-05-11 16:14:58 -07001659static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1660 struct xhci_container_ctx *in_ctx)
1661{
1662 struct xhci_input_control_ctx *ctrl_ctx;
1663 u32 valid_add_flags;
1664 u32 valid_drop_flags;
1665
1666 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1667 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1668 * (bit 1). The default control endpoint is added during the Address
1669 * Device command and is never removed until the slot is disabled.
1670 */
1671 valid_add_flags = ctrl_ctx->add_flags >> 2;
1672 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1673
1674 /* Use hweight32 to count the number of ones in the add flags, or
1675 * number of endpoints added. Don't count endpoints that are changed
1676 * (both added and dropped).
1677 */
1678 return hweight32(valid_add_flags) -
1679 hweight32(valid_add_flags & valid_drop_flags);
1680}
1681
1682static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1683 struct xhci_container_ctx *in_ctx)
1684{
1685 struct xhci_input_control_ctx *ctrl_ctx;
1686 u32 valid_add_flags;
1687 u32 valid_drop_flags;
1688
1689 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1690 valid_add_flags = ctrl_ctx->add_flags >> 2;
1691 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1692
1693 return hweight32(valid_drop_flags) -
1694 hweight32(valid_add_flags & valid_drop_flags);
1695}
1696
1697/*
1698 * We need to reserve the new number of endpoints before the configure endpoint
1699 * command completes. We can't subtract the dropped endpoints from the number
1700 * of active endpoints until the command completes because we can oversubscribe
1701 * the host in this case:
1702 *
1703 * - the first configure endpoint command drops more endpoints than it adds
1704 * - a second configure endpoint command that adds more endpoints is queued
1705 * - the first configure endpoint command fails, so the config is unchanged
1706 * - the second command may succeed, even though there isn't enough resources
1707 *
1708 * Must be called with xhci->lock held.
1709 */
1710static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1711 struct xhci_container_ctx *in_ctx)
1712{
1713 u32 added_eps;
1714
1715 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1716 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1717 xhci_dbg(xhci, "Not enough ep ctxs: "
1718 "%u active, need to add %u, limit is %u.\n",
1719 xhci->num_active_eps, added_eps,
1720 xhci->limit_active_eps);
1721 return -ENOMEM;
1722 }
1723 xhci->num_active_eps += added_eps;
1724 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1725 xhci->num_active_eps);
1726 return 0;
1727}
1728
1729/*
1730 * The configure endpoint was failed by the xHC for some other reason, so we
1731 * need to revert the resources that failed configuration would have used.
1732 *
1733 * Must be called with xhci->lock held.
1734 */
1735static void xhci_free_host_resources(struct xhci_hcd *xhci,
1736 struct xhci_container_ctx *in_ctx)
1737{
1738 u32 num_failed_eps;
1739
1740 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1741 xhci->num_active_eps -= num_failed_eps;
1742 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1743 num_failed_eps,
1744 xhci->num_active_eps);
1745}
1746
1747/*
1748 * Now that the command has completed, clean up the active endpoint count by
1749 * subtracting out the endpoints that were dropped (but not changed).
1750 *
1751 * Must be called with xhci->lock held.
1752 */
1753static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1754 struct xhci_container_ctx *in_ctx)
1755{
1756 u32 num_dropped_eps;
1757
1758 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1759 xhci->num_active_eps -= num_dropped_eps;
1760 if (num_dropped_eps)
1761 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1762 num_dropped_eps,
1763 xhci->num_active_eps);
1764}
1765
Sarah Sharpc29eea62011-09-02 11:05:52 -07001766unsigned int xhci_get_block_size(struct usb_device *udev)
1767{
1768 switch (udev->speed) {
1769 case USB_SPEED_LOW:
1770 case USB_SPEED_FULL:
1771 return FS_BLOCK;
1772 case USB_SPEED_HIGH:
1773 return HS_BLOCK;
1774 case USB_SPEED_SUPER:
1775 return SS_BLOCK;
1776 case USB_SPEED_UNKNOWN:
1777 case USB_SPEED_WIRELESS:
1778 default:
1779 /* Should never happen */
1780 return 1;
1781 }
1782}
1783
1784unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1785{
1786 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1787 return LS_OVERHEAD;
1788 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1789 return FS_OVERHEAD;
1790 return HS_OVERHEAD;
1791}
1792
1793/* If we are changing a LS/FS device under a HS hub,
1794 * make sure (if we are activating a new TT) that the HS bus has enough
1795 * bandwidth for this new TT.
1796 */
1797static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1798 struct xhci_virt_device *virt_dev,
1799 int old_active_eps)
1800{
1801 struct xhci_interval_bw_table *bw_table;
1802 struct xhci_tt_bw_info *tt_info;
1803
1804 /* Find the bandwidth table for the root port this TT is attached to. */
1805 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
1806 tt_info = virt_dev->tt_info;
1807 /* If this TT already had active endpoints, the bandwidth for this TT
1808 * has already been added. Removing all periodic endpoints (and thus
1809 * making the TT enactive) will only decrease the bandwidth used.
1810 */
1811 if (old_active_eps)
1812 return 0;
1813 if (old_active_eps == 0 && tt_info->active_eps != 0) {
1814 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
1815 return -ENOMEM;
1816 return 0;
1817 }
1818 /* Not sure why we would have no new active endpoints...
1819 *
1820 * Maybe because of an Evaluate Context change for a hub update or a
1821 * control endpoint 0 max packet size change?
1822 * FIXME: skip the bandwidth calculation in that case.
1823 */
1824 return 0;
1825}
1826
Sarah Sharp2b698992011-09-13 16:41:13 -07001827static int xhci_check_ss_bw(struct xhci_hcd *xhci,
1828 struct xhci_virt_device *virt_dev)
1829{
1830 unsigned int bw_reserved;
1831
1832 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
1833 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
1834 return -ENOMEM;
1835
1836 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
1837 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
1838 return -ENOMEM;
1839
1840 return 0;
1841}
1842
Sarah Sharpc29eea62011-09-02 11:05:52 -07001843/*
1844 * This algorithm is a very conservative estimate of the worst-case scheduling
1845 * scenario for any one interval. The hardware dynamically schedules the
1846 * packets, so we can't tell which microframe could be the limiting factor in
1847 * the bandwidth scheduling. This only takes into account periodic endpoints.
1848 *
1849 * Obviously, we can't solve an NP complete problem to find the minimum worst
1850 * case scenario. Instead, we come up with an estimate that is no less than
1851 * the worst case bandwidth used for any one microframe, but may be an
1852 * over-estimate.
1853 *
1854 * We walk the requirements for each endpoint by interval, starting with the
1855 * smallest interval, and place packets in the schedule where there is only one
1856 * possible way to schedule packets for that interval. In order to simplify
1857 * this algorithm, we record the largest max packet size for each interval, and
1858 * assume all packets will be that size.
1859 *
1860 * For interval 0, we obviously must schedule all packets for each interval.
1861 * The bandwidth for interval 0 is just the amount of data to be transmitted
1862 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
1863 * the number of packets).
1864 *
1865 * For interval 1, we have two possible microframes to schedule those packets
1866 * in. For this algorithm, if we can schedule the same number of packets for
1867 * each possible scheduling opportunity (each microframe), we will do so. The
1868 * remaining number of packets will be saved to be transmitted in the gaps in
1869 * the next interval's scheduling sequence.
1870 *
1871 * As we move those remaining packets to be scheduled with interval 2 packets,
1872 * we have to double the number of remaining packets to transmit. This is
1873 * because the intervals are actually powers of 2, and we would be transmitting
1874 * the previous interval's packets twice in this interval. We also have to be
1875 * sure that when we look at the largest max packet size for this interval, we
1876 * also look at the largest max packet size for the remaining packets and take
1877 * the greater of the two.
1878 *
1879 * The algorithm continues to evenly distribute packets in each scheduling
1880 * opportunity, and push the remaining packets out, until we get to the last
1881 * interval. Then those packets and their associated overhead are just added
1882 * to the bandwidth used.
Sarah Sharp2e279802011-09-02 11:05:50 -07001883 */
1884static int xhci_check_bw_table(struct xhci_hcd *xhci,
1885 struct xhci_virt_device *virt_dev,
1886 int old_active_eps)
1887{
Sarah Sharpc29eea62011-09-02 11:05:52 -07001888 unsigned int bw_reserved;
1889 unsigned int max_bandwidth;
1890 unsigned int bw_used;
1891 unsigned int block_size;
1892 struct xhci_interval_bw_table *bw_table;
1893 unsigned int packet_size = 0;
1894 unsigned int overhead = 0;
1895 unsigned int packets_transmitted = 0;
1896 unsigned int packets_remaining = 0;
1897 unsigned int i;
1898
Sarah Sharp2b698992011-09-13 16:41:13 -07001899 if (virt_dev->udev->speed == USB_SPEED_SUPER)
1900 return xhci_check_ss_bw(xhci, virt_dev);
1901
Sarah Sharpc29eea62011-09-02 11:05:52 -07001902 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
1903 max_bandwidth = HS_BW_LIMIT;
1904 /* Convert percent of bus BW reserved to blocks reserved */
1905 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
1906 } else {
1907 max_bandwidth = FS_BW_LIMIT;
1908 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
1909 }
1910
1911 bw_table = virt_dev->bw_table;
1912 /* We need to translate the max packet size and max ESIT payloads into
1913 * the units the hardware uses.
1914 */
1915 block_size = xhci_get_block_size(virt_dev->udev);
1916
1917 /* If we are manipulating a LS/FS device under a HS hub, double check
1918 * that the HS bus has enough bandwidth if we are activing a new TT.
1919 */
1920 if (virt_dev->tt_info) {
1921 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1922 virt_dev->real_port);
1923 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
1924 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
1925 "newly activated TT.\n");
1926 return -ENOMEM;
1927 }
1928 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
1929 virt_dev->tt_info->slot_id,
1930 virt_dev->tt_info->ttport);
1931 } else {
1932 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
1933 virt_dev->real_port);
1934 }
1935
1936 /* Add in how much bandwidth will be used for interval zero, or the
1937 * rounded max ESIT payload + number of packets * largest overhead.
1938 */
1939 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
1940 bw_table->interval_bw[0].num_packets *
1941 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
1942
1943 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
1944 unsigned int bw_added;
1945 unsigned int largest_mps;
1946 unsigned int interval_overhead;
1947
1948 /*
1949 * How many packets could we transmit in this interval?
1950 * If packets didn't fit in the previous interval, we will need
1951 * to transmit that many packets twice within this interval.
1952 */
1953 packets_remaining = 2 * packets_remaining +
1954 bw_table->interval_bw[i].num_packets;
1955
1956 /* Find the largest max packet size of this or the previous
1957 * interval.
1958 */
1959 if (list_empty(&bw_table->interval_bw[i].endpoints))
1960 largest_mps = 0;
1961 else {
1962 struct xhci_virt_ep *virt_ep;
1963 struct list_head *ep_entry;
1964
1965 ep_entry = bw_table->interval_bw[i].endpoints.next;
1966 virt_ep = list_entry(ep_entry,
1967 struct xhci_virt_ep, bw_endpoint_list);
1968 /* Convert to blocks, rounding up */
1969 largest_mps = DIV_ROUND_UP(
1970 virt_ep->bw_info.max_packet_size,
1971 block_size);
1972 }
1973 if (largest_mps > packet_size)
1974 packet_size = largest_mps;
1975
1976 /* Use the larger overhead of this or the previous interval. */
1977 interval_overhead = xhci_get_largest_overhead(
1978 &bw_table->interval_bw[i]);
1979 if (interval_overhead > overhead)
1980 overhead = interval_overhead;
1981
1982 /* How many packets can we evenly distribute across
1983 * (1 << (i + 1)) possible scheduling opportunities?
1984 */
1985 packets_transmitted = packets_remaining >> (i + 1);
1986
1987 /* Add in the bandwidth used for those scheduled packets */
1988 bw_added = packets_transmitted * (overhead + packet_size);
1989
1990 /* How many packets do we have remaining to transmit? */
1991 packets_remaining = packets_remaining % (1 << (i + 1));
1992
1993 /* What largest max packet size should those packets have? */
1994 /* If we've transmitted all packets, don't carry over the
1995 * largest packet size.
1996 */
1997 if (packets_remaining == 0) {
1998 packet_size = 0;
1999 overhead = 0;
2000 } else if (packets_transmitted > 0) {
2001 /* Otherwise if we do have remaining packets, and we've
2002 * scheduled some packets in this interval, take the
2003 * largest max packet size from endpoints with this
2004 * interval.
2005 */
2006 packet_size = largest_mps;
2007 overhead = interval_overhead;
2008 }
2009 /* Otherwise carry over packet_size and overhead from the last
2010 * time we had a remainder.
2011 */
2012 bw_used += bw_added;
2013 if (bw_used > max_bandwidth) {
2014 xhci_warn(xhci, "Not enough bandwidth. "
2015 "Proposed: %u, Max: %u\n",
2016 bw_used, max_bandwidth);
2017 return -ENOMEM;
2018 }
2019 }
2020 /*
2021 * Ok, we know we have some packets left over after even-handedly
2022 * scheduling interval 15. We don't know which microframes they will
2023 * fit into, so we over-schedule and say they will be scheduled every
2024 * microframe.
2025 */
2026 if (packets_remaining > 0)
2027 bw_used += overhead + packet_size;
2028
2029 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2030 unsigned int port_index = virt_dev->real_port - 1;
2031
2032 /* OK, we're manipulating a HS device attached to a
2033 * root port bandwidth domain. Include the number of active TTs
2034 * in the bandwidth used.
2035 */
2036 bw_used += TT_HS_OVERHEAD *
2037 xhci->rh_bw[port_index].num_active_tts;
2038 }
2039
2040 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2041 "Available: %u " "percent\n",
2042 bw_used, max_bandwidth, bw_reserved,
2043 (max_bandwidth - bw_used - bw_reserved) * 100 /
2044 max_bandwidth);
2045
2046 bw_used += bw_reserved;
2047 if (bw_used > max_bandwidth) {
2048 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2049 bw_used, max_bandwidth);
2050 return -ENOMEM;
2051 }
2052
2053 bw_table->bw_used = bw_used;
Sarah Sharp2e279802011-09-02 11:05:50 -07002054 return 0;
2055}
2056
2057static bool xhci_is_async_ep(unsigned int ep_type)
2058{
2059 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2060 ep_type != ISOC_IN_EP &&
2061 ep_type != INT_IN_EP);
2062}
2063
Sarah Sharp2b698992011-09-13 16:41:13 -07002064static bool xhci_is_sync_in_ep(unsigned int ep_type)
2065{
2066 return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
2067}
2068
2069static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2070{
2071 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2072
2073 if (ep_bw->ep_interval == 0)
2074 return SS_OVERHEAD_BURST +
2075 (ep_bw->mult * ep_bw->num_packets *
2076 (SS_OVERHEAD + mps));
2077 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2078 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2079 1 << ep_bw->ep_interval);
2080
2081}
2082
Sarah Sharp2e279802011-09-02 11:05:50 -07002083void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2084 struct xhci_bw_info *ep_bw,
2085 struct xhci_interval_bw_table *bw_table,
2086 struct usb_device *udev,
2087 struct xhci_virt_ep *virt_ep,
2088 struct xhci_tt_bw_info *tt_info)
2089{
2090 struct xhci_interval_bw *interval_bw;
2091 int normalized_interval;
2092
Sarah Sharp2b698992011-09-13 16:41:13 -07002093 if (xhci_is_async_ep(ep_bw->type))
Sarah Sharp2e279802011-09-02 11:05:50 -07002094 return;
2095
Sarah Sharp2b698992011-09-13 16:41:13 -07002096 if (udev->speed == USB_SPEED_SUPER) {
2097 if (xhci_is_sync_in_ep(ep_bw->type))
2098 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2099 xhci_get_ss_bw_consumed(ep_bw);
2100 else
2101 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2102 xhci_get_ss_bw_consumed(ep_bw);
2103 return;
2104 }
2105
2106 /* SuperSpeed endpoints never get added to intervals in the table, so
2107 * this check is only valid for HS/FS/LS devices.
2108 */
2109 if (list_empty(&virt_ep->bw_endpoint_list))
2110 return;
Sarah Sharp2e279802011-09-02 11:05:50 -07002111 /* For LS/FS devices, we need to translate the interval expressed in
2112 * microframes to frames.
2113 */
2114 if (udev->speed == USB_SPEED_HIGH)
2115 normalized_interval = ep_bw->ep_interval;
2116 else
2117 normalized_interval = ep_bw->ep_interval - 3;
2118
2119 if (normalized_interval == 0)
2120 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2121 interval_bw = &bw_table->interval_bw[normalized_interval];
2122 interval_bw->num_packets -= ep_bw->num_packets;
2123 switch (udev->speed) {
2124 case USB_SPEED_LOW:
2125 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2126 break;
2127 case USB_SPEED_FULL:
2128 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2129 break;
2130 case USB_SPEED_HIGH:
2131 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2132 break;
2133 case USB_SPEED_SUPER:
2134 case USB_SPEED_UNKNOWN:
2135 case USB_SPEED_WIRELESS:
2136 /* Should never happen because only LS/FS/HS endpoints will get
2137 * added to the endpoint list.
2138 */
2139 return;
2140 }
2141 if (tt_info)
2142 tt_info->active_eps -= 1;
2143 list_del_init(&virt_ep->bw_endpoint_list);
2144}
2145
2146static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2147 struct xhci_bw_info *ep_bw,
2148 struct xhci_interval_bw_table *bw_table,
2149 struct usb_device *udev,
2150 struct xhci_virt_ep *virt_ep,
2151 struct xhci_tt_bw_info *tt_info)
2152{
2153 struct xhci_interval_bw *interval_bw;
2154 struct xhci_virt_ep *smaller_ep;
2155 int normalized_interval;
2156
2157 if (xhci_is_async_ep(ep_bw->type))
2158 return;
2159
Sarah Sharp2b698992011-09-13 16:41:13 -07002160 if (udev->speed == USB_SPEED_SUPER) {
2161 if (xhci_is_sync_in_ep(ep_bw->type))
2162 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2163 xhci_get_ss_bw_consumed(ep_bw);
2164 else
2165 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2166 xhci_get_ss_bw_consumed(ep_bw);
2167 return;
2168 }
2169
Sarah Sharp2e279802011-09-02 11:05:50 -07002170 /* For LS/FS devices, we need to translate the interval expressed in
2171 * microframes to frames.
2172 */
2173 if (udev->speed == USB_SPEED_HIGH)
2174 normalized_interval = ep_bw->ep_interval;
2175 else
2176 normalized_interval = ep_bw->ep_interval - 3;
2177
2178 if (normalized_interval == 0)
2179 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2180 interval_bw = &bw_table->interval_bw[normalized_interval];
2181 interval_bw->num_packets += ep_bw->num_packets;
2182 switch (udev->speed) {
2183 case USB_SPEED_LOW:
2184 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2185 break;
2186 case USB_SPEED_FULL:
2187 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2188 break;
2189 case USB_SPEED_HIGH:
2190 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2191 break;
2192 case USB_SPEED_SUPER:
2193 case USB_SPEED_UNKNOWN:
2194 case USB_SPEED_WIRELESS:
2195 /* Should never happen because only LS/FS/HS endpoints will get
2196 * added to the endpoint list.
2197 */
2198 return;
2199 }
2200
2201 if (tt_info)
2202 tt_info->active_eps += 1;
2203 /* Insert the endpoint into the list, largest max packet size first. */
2204 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2205 bw_endpoint_list) {
2206 if (ep_bw->max_packet_size >=
2207 smaller_ep->bw_info.max_packet_size) {
2208 /* Add the new ep before the smaller endpoint */
2209 list_add_tail(&virt_ep->bw_endpoint_list,
2210 &smaller_ep->bw_endpoint_list);
2211 return;
2212 }
2213 }
2214 /* Add the new endpoint at the end of the list. */
2215 list_add_tail(&virt_ep->bw_endpoint_list,
2216 &interval_bw->endpoints);
2217}
2218
2219void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2220 struct xhci_virt_device *virt_dev,
2221 int old_active_eps)
2222{
2223 struct xhci_root_port_bw_info *rh_bw_info;
2224 if (!virt_dev->tt_info)
2225 return;
2226
2227 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2228 if (old_active_eps == 0 &&
2229 virt_dev->tt_info->active_eps != 0) {
2230 rh_bw_info->num_active_tts += 1;
Sarah Sharpc29eea62011-09-02 11:05:52 -07002231 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
Sarah Sharp2e279802011-09-02 11:05:50 -07002232 } else if (old_active_eps != 0 &&
2233 virt_dev->tt_info->active_eps == 0) {
2234 rh_bw_info->num_active_tts -= 1;
Sarah Sharpc29eea62011-09-02 11:05:52 -07002235 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
Sarah Sharp2e279802011-09-02 11:05:50 -07002236 }
2237}
2238
2239static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2240 struct xhci_virt_device *virt_dev,
2241 struct xhci_container_ctx *in_ctx)
2242{
2243 struct xhci_bw_info ep_bw_info[31];
2244 int i;
2245 struct xhci_input_control_ctx *ctrl_ctx;
2246 int old_active_eps = 0;
2247
Sarah Sharp2e279802011-09-02 11:05:50 -07002248 if (virt_dev->tt_info)
2249 old_active_eps = virt_dev->tt_info->active_eps;
2250
2251 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2252
2253 for (i = 0; i < 31; i++) {
2254 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2255 continue;
2256
2257 /* Make a copy of the BW info in case we need to revert this */
2258 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2259 sizeof(ep_bw_info[i]));
2260 /* Drop the endpoint from the interval table if the endpoint is
2261 * being dropped or changed.
2262 */
2263 if (EP_IS_DROPPED(ctrl_ctx, i))
2264 xhci_drop_ep_from_interval_table(xhci,
2265 &virt_dev->eps[i].bw_info,
2266 virt_dev->bw_table,
2267 virt_dev->udev,
2268 &virt_dev->eps[i],
2269 virt_dev->tt_info);
2270 }
2271 /* Overwrite the information stored in the endpoints' bw_info */
2272 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2273 for (i = 0; i < 31; i++) {
2274 /* Add any changed or added endpoints to the interval table */
2275 if (EP_IS_ADDED(ctrl_ctx, i))
2276 xhci_add_ep_to_interval_table(xhci,
2277 &virt_dev->eps[i].bw_info,
2278 virt_dev->bw_table,
2279 virt_dev->udev,
2280 &virt_dev->eps[i],
2281 virt_dev->tt_info);
2282 }
2283
2284 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2285 /* Ok, this fits in the bandwidth we have.
2286 * Update the number of active TTs.
2287 */
2288 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2289 return 0;
2290 }
2291
2292 /* We don't have enough bandwidth for this, revert the stored info. */
2293 for (i = 0; i < 31; i++) {
2294 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2295 continue;
2296
2297 /* Drop the new copies of any added or changed endpoints from
2298 * the interval table.
2299 */
2300 if (EP_IS_ADDED(ctrl_ctx, i)) {
2301 xhci_drop_ep_from_interval_table(xhci,
2302 &virt_dev->eps[i].bw_info,
2303 virt_dev->bw_table,
2304 virt_dev->udev,
2305 &virt_dev->eps[i],
2306 virt_dev->tt_info);
2307 }
2308 /* Revert the endpoint back to its old information */
2309 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2310 sizeof(ep_bw_info[i]));
2311 /* Add any changed or dropped endpoints back into the table */
2312 if (EP_IS_DROPPED(ctrl_ctx, i))
2313 xhci_add_ep_to_interval_table(xhci,
2314 &virt_dev->eps[i].bw_info,
2315 virt_dev->bw_table,
2316 virt_dev->udev,
2317 &virt_dev->eps[i],
2318 virt_dev->tt_info);
2319 }
2320 return -ENOMEM;
2321}
2322
2323
Sarah Sharpf2217e82009-08-07 14:04:43 -07002324/* Issue a configure endpoint command or evaluate context command
2325 * and wait for it to finish.
2326 */
2327static int xhci_configure_endpoint(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07002328 struct usb_device *udev,
2329 struct xhci_command *command,
2330 bool ctx_change, bool must_succeed)
Sarah Sharpf2217e82009-08-07 14:04:43 -07002331{
2332 int ret;
2333 int timeleft;
2334 unsigned long flags;
Sarah Sharp913a8a32009-09-04 10:53:13 -07002335 struct xhci_container_ctx *in_ctx;
2336 struct completion *cmd_completion;
Matt Evans28ccd292011-03-29 13:40:46 +11002337 u32 *cmd_status;
Sarah Sharp913a8a32009-09-04 10:53:13 -07002338 struct xhci_virt_device *virt_dev;
Sarah Sharpf2217e82009-08-07 14:04:43 -07002339
2340 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp913a8a32009-09-04 10:53:13 -07002341 virt_dev = xhci->devs[udev->slot_id];
Sarah Sharp2cf95c12011-05-11 16:14:58 -07002342
Sarah Sharp750645f2011-09-02 11:05:43 -07002343 if (command)
2344 in_ctx = command->in_ctx;
2345 else
2346 in_ctx = virt_dev->in_ctx;
2347
2348 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2349 xhci_reserve_host_resources(xhci, in_ctx)) {
2350 spin_unlock_irqrestore(&xhci->lock, flags);
2351 xhci_warn(xhci, "Not enough host resources, "
2352 "active endpoint contexts = %u\n",
2353 xhci->num_active_eps);
2354 return -ENOMEM;
2355 }
Sarah Sharp2e279802011-09-02 11:05:50 -07002356 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2357 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2358 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2359 xhci_free_host_resources(xhci, in_ctx);
2360 spin_unlock_irqrestore(&xhci->lock, flags);
2361 xhci_warn(xhci, "Not enough bandwidth\n");
2362 return -ENOMEM;
2363 }
Sarah Sharp750645f2011-09-02 11:05:43 -07002364
2365 if (command) {
Sarah Sharp913a8a32009-09-04 10:53:13 -07002366 cmd_completion = command->completion;
2367 cmd_status = &command->status;
2368 command->command_trb = xhci->cmd_ring->enqueue;
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08002369
2370 /* Enqueue pointer can be left pointing to the link TRB,
2371 * we must handle that
2372 */
Matt Evansf5960b62011-06-01 10:22:55 +10002373 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08002374 command->command_trb =
2375 xhci->cmd_ring->enq_seg->next->trbs;
2376
Sarah Sharp913a8a32009-09-04 10:53:13 -07002377 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2378 } else {
Sarah Sharp913a8a32009-09-04 10:53:13 -07002379 cmd_completion = &virt_dev->cmd_completion;
2380 cmd_status = &virt_dev->cmd_status;
2381 }
Andiry Xu1d680642010-03-12 17:10:04 +08002382 init_completion(cmd_completion);
Sarah Sharp913a8a32009-09-04 10:53:13 -07002383
Sarah Sharpf2217e82009-08-07 14:04:43 -07002384 if (!ctx_change)
Sarah Sharp913a8a32009-09-04 10:53:13 -07002385 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2386 udev->slot_id, must_succeed);
Sarah Sharpf2217e82009-08-07 14:04:43 -07002387 else
Sarah Sharp913a8a32009-09-04 10:53:13 -07002388 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
Sarah Sharpf2217e82009-08-07 14:04:43 -07002389 udev->slot_id);
2390 if (ret < 0) {
Sarah Sharpc01591b2009-12-09 15:58:58 -08002391 if (command)
2392 list_del(&command->cmd_list);
Sarah Sharp2cf95c12011-05-11 16:14:58 -07002393 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2394 xhci_free_host_resources(xhci, in_ctx);
Sarah Sharpf2217e82009-08-07 14:04:43 -07002395 spin_unlock_irqrestore(&xhci->lock, flags);
2396 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2397 return -ENOMEM;
2398 }
2399 xhci_ring_cmd_db(xhci);
2400 spin_unlock_irqrestore(&xhci->lock, flags);
2401
2402 /* Wait for the configure endpoint command to complete */
2403 timeleft = wait_for_completion_interruptible_timeout(
Sarah Sharp913a8a32009-09-04 10:53:13 -07002404 cmd_completion,
Sarah Sharpf2217e82009-08-07 14:04:43 -07002405 USB_CTRL_SET_TIMEOUT);
2406 if (timeleft <= 0) {
2407 xhci_warn(xhci, "%s while waiting for %s command\n",
2408 timeleft == 0 ? "Timeout" : "Signal",
2409 ctx_change == 0 ?
2410 "configure endpoint" :
2411 "evaluate context");
2412 /* FIXME cancel the configure endpoint command */
2413 return -ETIME;
2414 }
2415
2416 if (!ctx_change)
Sarah Sharp2cf95c12011-05-11 16:14:58 -07002417 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2418 else
2419 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2420
2421 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2422 spin_lock_irqsave(&xhci->lock, flags);
2423 /* If the command failed, remove the reserved resources.
2424 * Otherwise, clean up the estimate to include dropped eps.
2425 */
2426 if (ret)
2427 xhci_free_host_resources(xhci, in_ctx);
2428 else
2429 xhci_finish_resource_reservation(xhci, in_ctx);
2430 spin_unlock_irqrestore(&xhci->lock, flags);
2431 }
2432 return ret;
Sarah Sharpf2217e82009-08-07 14:04:43 -07002433}
2434
Sarah Sharpf88ba782009-05-14 11:44:22 -07002435/* Called after one or more calls to xhci_add_endpoint() or
2436 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2437 * to call xhci_reset_bandwidth().
2438 *
2439 * Since we are in the middle of changing either configuration or
2440 * installing a new alt setting, the USB core won't allow URBs to be
2441 * enqueued for any endpoint on the old config or interface. Nothing
2442 * else should be touching the xhci->devs[slot_id] structure, so we
2443 * don't need to take the xhci->lock for manipulating that.
2444 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07002445int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2446{
2447 int i;
2448 int ret = 0;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002449 struct xhci_hcd *xhci;
2450 struct xhci_virt_device *virt_dev;
John Yound115b042009-07-27 12:05:15 -07002451 struct xhci_input_control_ctx *ctrl_ctx;
2452 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002453
Andiry Xu64927732010-10-14 07:22:45 -07002454 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002455 if (ret <= 0)
2456 return ret;
2457 xhci = hcd_to_xhci(hcd);
Sarah Sharpfe6c6c12011-05-23 16:41:17 -07002458 if (xhci->xhc_state & XHCI_STATE_DYING)
2459 return -ENODEV;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002460
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002461 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002462 virt_dev = xhci->devs[udev->slot_id];
2463
2464 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
John Yound115b042009-07-27 12:05:15 -07002465 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11002466 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2467 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2468 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
Sarah Sharp2dc37532011-09-02 11:05:40 -07002469
2470 /* Don't issue the command if there's no endpoints to update. */
2471 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2472 ctrl_ctx->drop_flags == 0)
2473 return 0;
2474
Sarah Sharpf94e01862009-04-27 19:58:38 -07002475 xhci_dbg(xhci, "New Input Control Context:\n");
John Yound115b042009-07-27 12:05:15 -07002476 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2477 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
Matt Evans28ccd292011-03-29 13:40:46 +11002478 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
Sarah Sharpf94e01862009-04-27 19:58:38 -07002479
Sarah Sharp913a8a32009-09-04 10:53:13 -07002480 ret = xhci_configure_endpoint(xhci, udev, NULL,
2481 false, false);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002482 if (ret) {
2483 /* Callee should call reset_bandwidth() */
Sarah Sharpf94e01862009-04-27 19:58:38 -07002484 return ret;
2485 }
2486
2487 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
John Yound115b042009-07-27 12:05:15 -07002488 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
Matt Evans28ccd292011-03-29 13:40:46 +11002489 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
Sarah Sharpf94e01862009-04-27 19:58:38 -07002490
Sarah Sharp834cb0f2011-05-12 18:06:37 -07002491 /* Free any rings that were dropped, but not changed. */
2492 for (i = 1; i < 31; ++i) {
Matt Evans4819fef2011-06-01 13:01:07 +10002493 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2494 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
Sarah Sharp834cb0f2011-05-12 18:06:37 -07002495 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2496 }
John Yound115b042009-07-27 12:05:15 -07002497 xhci_zero_in_ctx(xhci, virt_dev);
Sarah Sharp834cb0f2011-05-12 18:06:37 -07002498 /*
2499 * Install any rings for completely new endpoints or changed endpoints,
2500 * and free or cache any old rings from changed endpoints.
2501 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07002502 for (i = 1; i < 31; ++i) {
Sarah Sharp74f9fe22009-12-03 09:44:29 -08002503 if (!virt_dev->eps[i].new_ring)
2504 continue;
2505 /* Only cache or free the old ring if it exists.
2506 * It may not if this is the first add of an endpoint.
2507 */
2508 if (virt_dev->eps[i].ring) {
Sarah Sharp412566b2009-12-09 15:59:01 -08002509 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002510 }
Sarah Sharp74f9fe22009-12-03 09:44:29 -08002511 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2512 virt_dev->eps[i].new_ring = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002513 }
2514
Sarah Sharpf94e01862009-04-27 19:58:38 -07002515 return ret;
2516}
2517
2518void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2519{
Sarah Sharpf94e01862009-04-27 19:58:38 -07002520 struct xhci_hcd *xhci;
2521 struct xhci_virt_device *virt_dev;
2522 int i, ret;
2523
Andiry Xu64927732010-10-14 07:22:45 -07002524 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002525 if (ret <= 0)
2526 return;
2527 xhci = hcd_to_xhci(hcd);
2528
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002529 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002530 virt_dev = xhci->devs[udev->slot_id];
2531 /* Free any rings allocated for added endpoints */
2532 for (i = 0; i < 31; ++i) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002533 if (virt_dev->eps[i].new_ring) {
2534 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2535 virt_dev->eps[i].new_ring = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002536 }
2537 }
John Yound115b042009-07-27 12:05:15 -07002538 xhci_zero_in_ctx(xhci, virt_dev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07002539}
2540
Sarah Sharp5270b952009-09-04 10:53:11 -07002541static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07002542 struct xhci_container_ctx *in_ctx,
2543 struct xhci_container_ctx *out_ctx,
2544 u32 add_flags, u32 drop_flags)
Sarah Sharp5270b952009-09-04 10:53:11 -07002545{
2546 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharp913a8a32009-09-04 10:53:13 -07002547 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11002548 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2549 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
Sarah Sharp913a8a32009-09-04 10:53:13 -07002550 xhci_slot_copy(xhci, in_ctx, out_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11002551 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
Sarah Sharp5270b952009-09-04 10:53:11 -07002552
Sarah Sharp913a8a32009-09-04 10:53:13 -07002553 xhci_dbg(xhci, "Input Context:\n");
2554 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
Sarah Sharp5270b952009-09-04 10:53:11 -07002555}
2556
Dmitry Torokhov8212a492011-02-08 13:55:59 -08002557static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002558 unsigned int slot_id, unsigned int ep_index,
2559 struct xhci_dequeue_state *deq_state)
2560{
2561 struct xhci_container_ctx *in_ctx;
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002562 struct xhci_ep_ctx *ep_ctx;
2563 u32 added_ctxs;
2564 dma_addr_t addr;
2565
Sarah Sharp913a8a32009-09-04 10:53:13 -07002566 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2567 xhci->devs[slot_id]->out_ctx, ep_index);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002568 in_ctx = xhci->devs[slot_id]->in_ctx;
2569 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2570 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2571 deq_state->new_deq_ptr);
2572 if (addr == 0) {
2573 xhci_warn(xhci, "WARN Cannot submit config ep after "
2574 "reset ep command\n");
2575 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2576 deq_state->new_deq_seg,
2577 deq_state->new_deq_ptr);
2578 return;
2579 }
Matt Evans28ccd292011-03-29 13:40:46 +11002580 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002581
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002582 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
Sarah Sharp913a8a32009-09-04 10:53:13 -07002583 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2584 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002585}
2586
Sarah Sharp82d10092009-08-07 14:04:52 -07002587void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002588 struct usb_device *udev, unsigned int ep_index)
Sarah Sharp82d10092009-08-07 14:04:52 -07002589{
2590 struct xhci_dequeue_state deq_state;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002591 struct xhci_virt_ep *ep;
Sarah Sharp82d10092009-08-07 14:04:52 -07002592
2593 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002594 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
Sarah Sharp82d10092009-08-07 14:04:52 -07002595 /* We need to move the HW's dequeue pointer past this TD,
2596 * or it will attempt to resend it on the next doorbell ring.
2597 */
2598 xhci_find_new_dequeue_state(xhci, udev->slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002599 ep_index, ep->stopped_stream, ep->stopped_td,
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002600 &deq_state);
Sarah Sharp82d10092009-08-07 14:04:52 -07002601
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002602 /* HW with the reset endpoint quirk will use the saved dequeue state to
2603 * issue a configure endpoint command later.
2604 */
2605 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2606 xhci_dbg(xhci, "Queueing new dequeue state\n");
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002607 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002608 ep_index, ep->stopped_stream, &deq_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002609 } else {
2610 /* Better hope no one uses the input context between now and the
2611 * reset endpoint completion!
Sarah Sharpe9df17e2010-04-02 15:34:43 -07002612 * XXX: No idea how this hardware will react when stream rings
2613 * are enabled.
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07002614 */
2615 xhci_dbg(xhci, "Setting up input context for "
2616 "configure endpoint command\n");
2617 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2618 ep_index, &deq_state);
2619 }
Sarah Sharp82d10092009-08-07 14:04:52 -07002620}
2621
Sarah Sharpa1587d92009-07-27 12:03:15 -07002622/* Deal with stalled endpoints. The core should have sent the control message
2623 * to clear the halt condition. However, we need to make the xHCI hardware
2624 * reset its sequence number, since a device will expect a sequence number of
2625 * zero after the halt condition is cleared.
2626 * Context: in_interrupt
2627 */
2628void xhci_endpoint_reset(struct usb_hcd *hcd,
2629 struct usb_host_endpoint *ep)
2630{
2631 struct xhci_hcd *xhci;
2632 struct usb_device *udev;
2633 unsigned int ep_index;
2634 unsigned long flags;
2635 int ret;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002636 struct xhci_virt_ep *virt_ep;
Sarah Sharpa1587d92009-07-27 12:03:15 -07002637
2638 xhci = hcd_to_xhci(hcd);
2639 udev = (struct usb_device *) ep->hcpriv;
2640 /* Called with a root hub endpoint (or an endpoint that wasn't added
2641 * with xhci_add_endpoint()
2642 */
2643 if (!ep->hcpriv)
2644 return;
2645 ep_index = xhci_get_endpoint_index(&ep->desc);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002646 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2647 if (!virt_ep->stopped_td) {
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002648 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2649 ep->desc.bEndpointAddress);
2650 return;
2651 }
Sarah Sharp82d10092009-08-07 14:04:52 -07002652 if (usb_endpoint_xfer_control(&ep->desc)) {
2653 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2654 return;
2655 }
Sarah Sharpa1587d92009-07-27 12:03:15 -07002656
2657 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2658 spin_lock_irqsave(&xhci->lock, flags);
2659 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07002660 /*
2661 * Can't change the ring dequeue pointer until it's transitioned to the
2662 * stopped state, which is only upon a successful reset endpoint
2663 * command. Better hope that last command worked!
2664 */
Sarah Sharpa1587d92009-07-27 12:03:15 -07002665 if (!ret) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07002666 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2667 kfree(virt_ep->stopped_td);
Sarah Sharpa1587d92009-07-27 12:03:15 -07002668 xhci_ring_cmd_db(xhci);
2669 }
Sarah Sharp1624ae12010-05-06 13:40:08 -07002670 virt_ep->stopped_td = NULL;
2671 virt_ep->stopped_trb = NULL;
Sarah Sharp5e5cf6f2010-05-06 13:40:18 -07002672 virt_ep->stopped_stream = 0;
Sarah Sharpa1587d92009-07-27 12:03:15 -07002673 spin_unlock_irqrestore(&xhci->lock, flags);
2674
2675 if (ret)
2676 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2677}
2678
Sarah Sharp8df75f42010-04-02 15:34:16 -07002679static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2680 struct usb_device *udev, struct usb_host_endpoint *ep,
2681 unsigned int slot_id)
2682{
2683 int ret;
2684 unsigned int ep_index;
2685 unsigned int ep_state;
2686
2687 if (!ep)
2688 return -EINVAL;
Andiry Xu64927732010-10-14 07:22:45 -07002689 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
Sarah Sharp8df75f42010-04-02 15:34:16 -07002690 if (ret <= 0)
2691 return -EINVAL;
Alan Stern842f1692010-04-30 12:44:46 -04002692 if (ep->ss_ep_comp.bmAttributes == 0) {
Sarah Sharp8df75f42010-04-02 15:34:16 -07002693 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2694 " descriptor for ep 0x%x does not support streams\n",
2695 ep->desc.bEndpointAddress);
2696 return -EINVAL;
2697 }
2698
2699 ep_index = xhci_get_endpoint_index(&ep->desc);
2700 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2701 if (ep_state & EP_HAS_STREAMS ||
2702 ep_state & EP_GETTING_STREAMS) {
2703 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2704 "already has streams set up.\n",
2705 ep->desc.bEndpointAddress);
2706 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2707 "dynamic stream context array reallocation.\n");
2708 return -EINVAL;
2709 }
2710 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2711 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2712 "endpoint 0x%x; URBs are pending.\n",
2713 ep->desc.bEndpointAddress);
2714 return -EINVAL;
2715 }
2716 return 0;
2717}
2718
2719static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2720 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2721{
2722 unsigned int max_streams;
2723
2724 /* The stream context array size must be a power of two */
2725 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2726 /*
2727 * Find out how many primary stream array entries the host controller
2728 * supports. Later we may use secondary stream arrays (similar to 2nd
2729 * level page entries), but that's an optional feature for xHCI host
2730 * controllers. xHCs must support at least 4 stream IDs.
2731 */
2732 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2733 if (*num_stream_ctxs > max_streams) {
2734 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2735 max_streams);
2736 *num_stream_ctxs = max_streams;
2737 *num_streams = max_streams;
2738 }
2739}
2740
2741/* Returns an error code if one of the endpoint already has streams.
2742 * This does not change any data structures, it only checks and gathers
2743 * information.
2744 */
2745static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2746 struct usb_device *udev,
2747 struct usb_host_endpoint **eps, unsigned int num_eps,
2748 unsigned int *num_streams, u32 *changed_ep_bitmask)
2749{
Sarah Sharp8df75f42010-04-02 15:34:16 -07002750 unsigned int max_streams;
2751 unsigned int endpoint_flag;
2752 int i;
2753 int ret;
2754
2755 for (i = 0; i < num_eps; i++) {
2756 ret = xhci_check_streams_endpoint(xhci, udev,
2757 eps[i], udev->slot_id);
2758 if (ret < 0)
2759 return ret;
2760
Alan Stern842f1692010-04-30 12:44:46 -04002761 max_streams = USB_SS_MAX_STREAMS(
2762 eps[i]->ss_ep_comp.bmAttributes);
Sarah Sharp8df75f42010-04-02 15:34:16 -07002763 if (max_streams < (*num_streams - 1)) {
2764 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2765 eps[i]->desc.bEndpointAddress,
2766 max_streams);
2767 *num_streams = max_streams+1;
2768 }
2769
2770 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2771 if (*changed_ep_bitmask & endpoint_flag)
2772 return -EINVAL;
2773 *changed_ep_bitmask |= endpoint_flag;
2774 }
2775 return 0;
2776}
2777
2778static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2779 struct usb_device *udev,
2780 struct usb_host_endpoint **eps, unsigned int num_eps)
2781{
2782 u32 changed_ep_bitmask = 0;
2783 unsigned int slot_id;
2784 unsigned int ep_index;
2785 unsigned int ep_state;
2786 int i;
2787
2788 slot_id = udev->slot_id;
2789 if (!xhci->devs[slot_id])
2790 return 0;
2791
2792 for (i = 0; i < num_eps; i++) {
2793 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2794 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2795 /* Are streams already being freed for the endpoint? */
2796 if (ep_state & EP_GETTING_NO_STREAMS) {
2797 xhci_warn(xhci, "WARN Can't disable streams for "
2798 "endpoint 0x%x\n, "
2799 "streams are being disabled already.",
2800 eps[i]->desc.bEndpointAddress);
2801 return 0;
2802 }
2803 /* Are there actually any streams to free? */
2804 if (!(ep_state & EP_HAS_STREAMS) &&
2805 !(ep_state & EP_GETTING_STREAMS)) {
2806 xhci_warn(xhci, "WARN Can't disable streams for "
2807 "endpoint 0x%x\n, "
2808 "streams are already disabled!",
2809 eps[i]->desc.bEndpointAddress);
2810 xhci_warn(xhci, "WARN xhci_free_streams() called "
2811 "with non-streams endpoint\n");
2812 return 0;
2813 }
2814 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
2815 }
2816 return changed_ep_bitmask;
2817}
2818
2819/*
2820 * The USB device drivers use this function (though the HCD interface in USB
2821 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
2822 * coordinate mass storage command queueing across multiple endpoints (basically
2823 * a stream ID == a task ID).
2824 *
2825 * Setting up streams involves allocating the same size stream context array
2826 * for each endpoint and issuing a configure endpoint command for all endpoints.
2827 *
2828 * Don't allow the call to succeed if one endpoint only supports one stream
2829 * (which means it doesn't support streams at all).
2830 *
2831 * Drivers may get less stream IDs than they asked for, if the host controller
2832 * hardware or endpoints claim they can't support the number of requested
2833 * stream IDs.
2834 */
2835int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
2836 struct usb_host_endpoint **eps, unsigned int num_eps,
2837 unsigned int num_streams, gfp_t mem_flags)
2838{
2839 int i, ret;
2840 struct xhci_hcd *xhci;
2841 struct xhci_virt_device *vdev;
2842 struct xhci_command *config_cmd;
2843 unsigned int ep_index;
2844 unsigned int num_stream_ctxs;
2845 unsigned long flags;
2846 u32 changed_ep_bitmask = 0;
2847
2848 if (!eps)
2849 return -EINVAL;
2850
2851 /* Add one to the number of streams requested to account for
2852 * stream 0 that is reserved for xHCI usage.
2853 */
2854 num_streams += 1;
2855 xhci = hcd_to_xhci(hcd);
2856 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2857 num_streams);
2858
2859 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2860 if (!config_cmd) {
2861 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2862 return -ENOMEM;
2863 }
2864
2865 /* Check to make sure all endpoints are not already configured for
2866 * streams. While we're at it, find the maximum number of streams that
2867 * all the endpoints will support and check for duplicate endpoints.
2868 */
2869 spin_lock_irqsave(&xhci->lock, flags);
2870 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2871 num_eps, &num_streams, &changed_ep_bitmask);
2872 if (ret < 0) {
2873 xhci_free_command(xhci, config_cmd);
2874 spin_unlock_irqrestore(&xhci->lock, flags);
2875 return ret;
2876 }
2877 if (num_streams <= 1) {
2878 xhci_warn(xhci, "WARN: endpoints can't handle "
2879 "more than one stream.\n");
2880 xhci_free_command(xhci, config_cmd);
2881 spin_unlock_irqrestore(&xhci->lock, flags);
2882 return -EINVAL;
2883 }
2884 vdev = xhci->devs[udev->slot_id];
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002885 /* Mark each endpoint as being in transition, so
Sarah Sharp8df75f42010-04-02 15:34:16 -07002886 * xhci_urb_enqueue() will reject all URBs.
2887 */
2888 for (i = 0; i < num_eps; i++) {
2889 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2890 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2891 }
2892 spin_unlock_irqrestore(&xhci->lock, flags);
2893
2894 /* Setup internal data structures and allocate HW data structures for
2895 * streams (but don't install the HW structures in the input context
2896 * until we're sure all memory allocation succeeded).
2897 */
2898 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2899 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2900 num_stream_ctxs, num_streams);
2901
2902 for (i = 0; i < num_eps; i++) {
2903 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2904 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2905 num_stream_ctxs,
2906 num_streams, mem_flags);
2907 if (!vdev->eps[ep_index].stream_info)
2908 goto cleanup;
2909 /* Set maxPstreams in endpoint context and update deq ptr to
2910 * point to stream context array. FIXME
2911 */
2912 }
2913
2914 /* Set up the input context for a configure endpoint command. */
2915 for (i = 0; i < num_eps; i++) {
2916 struct xhci_ep_ctx *ep_ctx;
2917
2918 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2919 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2920
2921 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2922 vdev->out_ctx, ep_index);
2923 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2924 vdev->eps[ep_index].stream_info);
2925 }
2926 /* Tell the HW to drop its old copy of the endpoint context info
2927 * and add the updated copy from the input context.
2928 */
2929 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2930 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2931
2932 /* Issue and wait for the configure endpoint command */
2933 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2934 false, false);
2935
2936 /* xHC rejected the configure endpoint command for some reason, so we
2937 * leave the old ring intact and free our internal streams data
2938 * structure.
2939 */
2940 if (ret < 0)
2941 goto cleanup;
2942
2943 spin_lock_irqsave(&xhci->lock, flags);
2944 for (i = 0; i < num_eps; i++) {
2945 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2946 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2947 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
2948 udev->slot_id, ep_index);
2949 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
2950 }
2951 xhci_free_command(xhci, config_cmd);
2952 spin_unlock_irqrestore(&xhci->lock, flags);
2953
2954 /* Subtract 1 for stream 0, which drivers can't use */
2955 return num_streams - 1;
2956
2957cleanup:
2958 /* If it didn't work, free the streams! */
2959 for (i = 0; i < num_eps; i++) {
2960 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2961 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
Sarah Sharp8a007742010-04-30 15:37:56 -07002962 vdev->eps[ep_index].stream_info = NULL;
Sarah Sharp8df75f42010-04-02 15:34:16 -07002963 /* FIXME Unset maxPstreams in endpoint context and
2964 * update deq ptr to point to normal string ring.
2965 */
2966 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2967 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2968 xhci_endpoint_zero(xhci, vdev, eps[i]);
2969 }
2970 xhci_free_command(xhci, config_cmd);
2971 return -ENOMEM;
2972}
2973
2974/* Transition the endpoint from using streams to being a "normal" endpoint
2975 * without streams.
2976 *
2977 * Modify the endpoint context state, submit a configure endpoint command,
2978 * and free all endpoint rings for streams if that completes successfully.
2979 */
2980int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
2981 struct usb_host_endpoint **eps, unsigned int num_eps,
2982 gfp_t mem_flags)
2983{
2984 int i, ret;
2985 struct xhci_hcd *xhci;
2986 struct xhci_virt_device *vdev;
2987 struct xhci_command *command;
2988 unsigned int ep_index;
2989 unsigned long flags;
2990 u32 changed_ep_bitmask;
2991
2992 xhci = hcd_to_xhci(hcd);
2993 vdev = xhci->devs[udev->slot_id];
2994
2995 /* Set up a configure endpoint command to remove the streams rings */
2996 spin_lock_irqsave(&xhci->lock, flags);
2997 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
2998 udev, eps, num_eps);
2999 if (changed_ep_bitmask == 0) {
3000 spin_unlock_irqrestore(&xhci->lock, flags);
3001 return -EINVAL;
3002 }
3003
3004 /* Use the xhci_command structure from the first endpoint. We may have
3005 * allocated too many, but the driver may call xhci_free_streams() for
3006 * each endpoint it grouped into one call to xhci_alloc_streams().
3007 */
3008 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3009 command = vdev->eps[ep_index].stream_info->free_streams_command;
3010 for (i = 0; i < num_eps; i++) {
3011 struct xhci_ep_ctx *ep_ctx;
3012
3013 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3014 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3015 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3016 EP_GETTING_NO_STREAMS;
3017
3018 xhci_endpoint_copy(xhci, command->in_ctx,
3019 vdev->out_ctx, ep_index);
3020 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3021 &vdev->eps[ep_index]);
3022 }
3023 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3024 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3025 spin_unlock_irqrestore(&xhci->lock, flags);
3026
3027 /* Issue and wait for the configure endpoint command,
3028 * which must succeed.
3029 */
3030 ret = xhci_configure_endpoint(xhci, udev, command,
3031 false, true);
3032
3033 /* xHC rejected the configure endpoint command for some reason, so we
3034 * leave the streams rings intact.
3035 */
3036 if (ret < 0)
3037 return ret;
3038
3039 spin_lock_irqsave(&xhci->lock, flags);
3040 for (i = 0; i < num_eps; i++) {
3041 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3042 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
Sarah Sharp8a007742010-04-30 15:37:56 -07003043 vdev->eps[ep_index].stream_info = NULL;
Sarah Sharp8df75f42010-04-02 15:34:16 -07003044 /* FIXME Unset maxPstreams in endpoint context and
3045 * update deq ptr to point to normal string ring.
3046 */
3047 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3048 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3049 }
3050 spin_unlock_irqrestore(&xhci->lock, flags);
3051
3052 return 0;
3053}
3054
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003055/*
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003056 * Deletes endpoint resources for endpoints that were active before a Reset
3057 * Device command, or a Disable Slot command. The Reset Device command leaves
3058 * the control endpoint intact, whereas the Disable Slot command deletes it.
3059 *
3060 * Must be called with xhci->lock held.
3061 */
3062void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3063 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3064{
3065 int i;
3066 unsigned int num_dropped_eps = 0;
3067 unsigned int drop_flags = 0;
3068
3069 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3070 if (virt_dev->eps[i].ring) {
3071 drop_flags |= 1 << i;
3072 num_dropped_eps++;
3073 }
3074 }
3075 xhci->num_active_eps -= num_dropped_eps;
3076 if (num_dropped_eps)
3077 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3078 "%u now active.\n",
3079 num_dropped_eps, drop_flags,
3080 xhci->num_active_eps);
3081}
3082
3083/*
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003084 * This submits a Reset Device Command, which will set the device state to 0,
3085 * set the device address to 0, and disable all the endpoints except the default
3086 * control endpoint. The USB core should come back and call
3087 * xhci_address_device(), and then re-set up the configuration. If this is
3088 * called because of a usb_reset_and_verify_device(), then the old alternate
3089 * settings will be re-installed through the normal bandwidth allocation
3090 * functions.
3091 *
3092 * Wait for the Reset Device command to finish. Remove all structures
3093 * associated with the endpoints that were disabled. Clear the input device
3094 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
Andiry Xuf0615c42010-10-14 07:22:48 -07003095 *
3096 * If the virt_dev to be reset does not exist or does not match the udev,
3097 * it means the device is lost, possibly due to the xHC restore error and
3098 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3099 * re-allocate the device.
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003100 */
Andiry Xuf0615c42010-10-14 07:22:48 -07003101int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003102{
3103 int ret, i;
3104 unsigned long flags;
3105 struct xhci_hcd *xhci;
3106 unsigned int slot_id;
3107 struct xhci_virt_device *virt_dev;
3108 struct xhci_command *reset_device_cmd;
3109 int timeleft;
3110 int last_freed_endpoint;
Maarten Lankhorst001fd382011-06-01 23:27:50 +02003111 struct xhci_slot_ctx *slot_ctx;
Sarah Sharp2e279802011-09-02 11:05:50 -07003112 int old_active_eps = 0;
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003113
Andiry Xuf0615c42010-10-14 07:22:48 -07003114 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003115 if (ret <= 0)
3116 return ret;
3117 xhci = hcd_to_xhci(hcd);
3118 slot_id = udev->slot_id;
3119 virt_dev = xhci->devs[slot_id];
Andiry Xuf0615c42010-10-14 07:22:48 -07003120 if (!virt_dev) {
3121 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3122 "not exist. Re-allocate the device\n", slot_id);
3123 ret = xhci_alloc_dev(hcd, udev);
3124 if (ret == 1)
3125 return 0;
3126 else
3127 return -EINVAL;
3128 }
3129
3130 if (virt_dev->udev != udev) {
3131 /* If the virt_dev and the udev does not match, this virt_dev
3132 * may belong to another udev.
3133 * Re-allocate the device.
3134 */
3135 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3136 "not match the udev. Re-allocate the device\n",
3137 slot_id);
3138 ret = xhci_alloc_dev(hcd, udev);
3139 if (ret == 1)
3140 return 0;
3141 else
3142 return -EINVAL;
3143 }
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003144
Maarten Lankhorst001fd382011-06-01 23:27:50 +02003145 /* If device is not setup, there is no point in resetting it */
3146 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3147 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3148 SLOT_STATE_DISABLED)
3149 return 0;
3150
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003151 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3152 /* Allocate the command structure that holds the struct completion.
3153 * Assume we're in process context, since the normal device reset
3154 * process has to wait for the device anyway. Storage devices are
3155 * reset as part of error handling, so use GFP_NOIO instead of
3156 * GFP_KERNEL.
3157 */
3158 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3159 if (!reset_device_cmd) {
3160 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3161 return -ENOMEM;
3162 }
3163
3164 /* Attempt to submit the Reset Device command to the command ring */
3165 spin_lock_irqsave(&xhci->lock, flags);
3166 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08003167
3168 /* Enqueue pointer can be left pointing to the link TRB,
3169 * we must handle that
3170 */
Matt Evansf5960b62011-06-01 10:22:55 +10003171 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08003172 reset_device_cmd->command_trb =
3173 xhci->cmd_ring->enq_seg->next->trbs;
3174
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003175 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3176 ret = xhci_queue_reset_device(xhci, slot_id);
3177 if (ret) {
3178 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3179 list_del(&reset_device_cmd->cmd_list);
3180 spin_unlock_irqrestore(&xhci->lock, flags);
3181 goto command_cleanup;
3182 }
3183 xhci_ring_cmd_db(xhci);
3184 spin_unlock_irqrestore(&xhci->lock, flags);
3185
3186 /* Wait for the Reset Device command to finish */
3187 timeleft = wait_for_completion_interruptible_timeout(
3188 reset_device_cmd->completion,
3189 USB_CTRL_SET_TIMEOUT);
3190 if (timeleft <= 0) {
3191 xhci_warn(xhci, "%s while waiting for reset device command\n",
3192 timeleft == 0 ? "Timeout" : "Signal");
3193 spin_lock_irqsave(&xhci->lock, flags);
3194 /* The timeout might have raced with the event ring handler, so
3195 * only delete from the list if the item isn't poisoned.
3196 */
3197 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3198 list_del(&reset_device_cmd->cmd_list);
3199 spin_unlock_irqrestore(&xhci->lock, flags);
3200 ret = -ETIME;
3201 goto command_cleanup;
3202 }
3203
3204 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3205 * unless we tried to reset a slot ID that wasn't enabled,
3206 * or the device wasn't in the addressed or configured state.
3207 */
3208 ret = reset_device_cmd->status;
3209 switch (ret) {
3210 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3211 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3212 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3213 slot_id,
3214 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3215 xhci_info(xhci, "Not freeing device rings.\n");
3216 /* Don't treat this as an error. May change my mind later. */
3217 ret = 0;
3218 goto command_cleanup;
3219 case COMP_SUCCESS:
3220 xhci_dbg(xhci, "Successful reset device command.\n");
3221 break;
3222 default:
3223 if (xhci_is_vendor_info_code(xhci, ret))
3224 break;
3225 xhci_warn(xhci, "Unknown completion code %u for "
3226 "reset device command.\n", ret);
3227 ret = -EINVAL;
3228 goto command_cleanup;
3229 }
3230
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003231 /* Free up host controller endpoint resources */
3232 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3233 spin_lock_irqsave(&xhci->lock, flags);
3234 /* Don't delete the default control endpoint resources */
3235 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3236 spin_unlock_irqrestore(&xhci->lock, flags);
3237 }
3238
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003239 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3240 last_freed_endpoint = 1;
3241 for (i = 1; i < 31; ++i) {
Dmitry Torokhov2dea75d2011-04-12 23:06:28 -07003242 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3243
3244 if (ep->ep_state & EP_HAS_STREAMS) {
3245 xhci_free_stream_info(xhci, ep->stream_info);
3246 ep->stream_info = NULL;
3247 ep->ep_state &= ~EP_HAS_STREAMS;
3248 }
3249
3250 if (ep->ring) {
3251 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3252 last_freed_endpoint = i;
3253 }
Sarah Sharp2e279802011-09-02 11:05:50 -07003254 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3255 xhci_drop_ep_from_interval_table(xhci,
3256 &virt_dev->eps[i].bw_info,
3257 virt_dev->bw_table,
3258 udev,
3259 &virt_dev->eps[i],
3260 virt_dev->tt_info);
Sarah Sharp9af5d712011-09-02 11:05:48 -07003261 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003262 }
Sarah Sharp2e279802011-09-02 11:05:50 -07003263 /* If necessary, update the number of active TTs on this root port */
3264 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3265
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08003266 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3267 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3268 ret = 0;
3269
3270command_cleanup:
3271 xhci_free_command(xhci, reset_device_cmd);
3272 return ret;
3273}
3274
3275/*
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003276 * At this point, the struct usb_device is about to go away, the device has
3277 * disconnected, and all traffic has been stopped and the endpoints have been
3278 * disabled. Free any HC data structures associated with that device.
3279 */
3280void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3281{
3282 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07003283 struct xhci_virt_device *virt_dev;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003284 unsigned long flags;
Sarah Sharpc526d0d2009-09-16 16:42:39 -07003285 u32 state;
Andiry Xu64927732010-10-14 07:22:45 -07003286 int i, ret;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003287
Andiry Xu64927732010-10-14 07:22:45 -07003288 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
Sarah Sharp7bd89b42011-07-01 13:35:40 -07003289 /* If the host is halted due to driver unload, we still need to free the
3290 * device.
3291 */
3292 if (ret <= 0 && ret != -ENODEV)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003293 return;
Andiry Xu64927732010-10-14 07:22:45 -07003294
Sarah Sharp6f5165c2009-10-27 10:57:01 -07003295 virt_dev = xhci->devs[udev->slot_id];
Sarah Sharp6f5165c2009-10-27 10:57:01 -07003296
3297 /* Stop any wayward timer functions (which may grab the lock) */
3298 for (i = 0; i < 31; ++i) {
3299 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3300 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3301 }
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003302
Andiry Xu65580b432011-09-23 14:19:52 -07003303 if (udev->usb2_hw_lpm_enabled) {
3304 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3305 udev->usb2_hw_lpm_enabled = 0;
3306 }
3307
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003308 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharpc526d0d2009-09-16 16:42:39 -07003309 /* Don't disable the slot if the host controller is dead. */
3310 state = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharp7bd89b42011-07-01 13:35:40 -07003311 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3312 (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharpc526d0d2009-09-16 16:42:39 -07003313 xhci_free_virt_device(xhci, udev->slot_id);
3314 spin_unlock_irqrestore(&xhci->lock, flags);
3315 return;
3316 }
3317
Sarah Sharp23e3be12009-04-29 19:05:20 -07003318 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003319 spin_unlock_irqrestore(&xhci->lock, flags);
3320 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3321 return;
3322 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07003323 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003324 spin_unlock_irqrestore(&xhci->lock, flags);
3325 /*
3326 * Event command completion handler will free any data structures
Sarah Sharpf88ba782009-05-14 11:44:22 -07003327 * associated with the slot. XXX Can free sleep?
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003328 */
3329}
3330
3331/*
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003332 * Checks if we have enough host controller resources for the default control
3333 * endpoint.
3334 *
3335 * Must be called with xhci->lock held.
3336 */
3337static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3338{
3339 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3340 xhci_dbg(xhci, "Not enough ep ctxs: "
3341 "%u active, need to add 1, limit is %u.\n",
3342 xhci->num_active_eps, xhci->limit_active_eps);
3343 return -ENOMEM;
3344 }
3345 xhci->num_active_eps += 1;
3346 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3347 xhci->num_active_eps);
3348 return 0;
3349}
3350
3351
3352/*
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003353 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3354 * timed out, or allocating memory failed. Returns 1 on success.
3355 */
3356int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3357{
3358 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3359 unsigned long flags;
3360 int timeleft;
3361 int ret;
3362
3363 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp23e3be12009-04-29 19:05:20 -07003364 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003365 if (ret) {
3366 spin_unlock_irqrestore(&xhci->lock, flags);
3367 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3368 return 0;
3369 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07003370 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003371 spin_unlock_irqrestore(&xhci->lock, flags);
3372
3373 /* XXX: how much time for xHC slot assignment? */
3374 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3375 USB_CTRL_SET_TIMEOUT);
3376 if (timeleft <= 0) {
3377 xhci_warn(xhci, "%s while waiting for a slot\n",
3378 timeleft == 0 ? "Timeout" : "Signal");
3379 /* FIXME cancel the enable slot request */
3380 return 0;
3381 }
3382
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003383 if (!xhci->slot_id) {
3384 xhci_err(xhci, "Error while assigning device slot ID\n");
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003385 return 0;
3386 }
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003387
3388 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3389 spin_lock_irqsave(&xhci->lock, flags);
3390 ret = xhci_reserve_host_control_ep_resources(xhci);
3391 if (ret) {
3392 spin_unlock_irqrestore(&xhci->lock, flags);
3393 xhci_warn(xhci, "Not enough host resources, "
3394 "active endpoint contexts = %u\n",
3395 xhci->num_active_eps);
3396 goto disable_slot;
3397 }
3398 spin_unlock_irqrestore(&xhci->lock, flags);
3399 }
3400 /* Use GFP_NOIO, since this function can be called from
Sarah Sharpa6d940d2010-12-28 13:08:42 -08003401 * xhci_discover_or_reset_device(), which may be called as part of
3402 * mass storage driver error handling.
3403 */
3404 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003405 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003406 goto disable_slot;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003407 }
3408 udev->slot_id = xhci->slot_id;
3409 /* Is this a LS or FS device under a HS hub? */
3410 /* Hub or peripherial? */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003411 return 1;
Sarah Sharp2cf95c12011-05-11 16:14:58 -07003412
3413disable_slot:
3414 /* Disable slot, if we can do it without mem alloc */
3415 spin_lock_irqsave(&xhci->lock, flags);
3416 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3417 xhci_ring_cmd_db(xhci);
3418 spin_unlock_irqrestore(&xhci->lock, flags);
3419 return 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003420}
3421
3422/*
3423 * Issue an Address Device command (which will issue a SetAddress request to
3424 * the device).
3425 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3426 * we should only issue and wait on one address command at the same time.
3427 *
3428 * We add one to the device address issued by the hardware because the USB core
3429 * uses address 1 for the root hubs (even though they're not really devices).
3430 */
3431int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3432{
3433 unsigned long flags;
3434 int timeleft;
3435 struct xhci_virt_device *virt_dev;
3436 int ret = 0;
3437 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
John Yound115b042009-07-27 12:05:15 -07003438 struct xhci_slot_ctx *slot_ctx;
3439 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharp8e595a52009-07-27 12:03:31 -07003440 u64 temp_64;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003441
3442 if (!udev->slot_id) {
3443 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3444 return -EINVAL;
3445 }
3446
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003447 virt_dev = xhci->devs[udev->slot_id];
3448
Matt Evans7ed603e2011-03-29 13:40:56 +11003449 if (WARN_ON(!virt_dev)) {
3450 /*
3451 * In plug/unplug torture test with an NEC controller,
3452 * a zero-dereference was observed once due to virt_dev = 0.
3453 * Print useful debug rather than crash if it is observed again!
3454 */
3455 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3456 udev->slot_id);
3457 return -EINVAL;
3458 }
3459
Andiry Xuf0615c42010-10-14 07:22:48 -07003460 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3461 /*
3462 * If this is the first Set Address since device plug-in or
3463 * virt_device realloaction after a resume with an xHCI power loss,
3464 * then set up the slot context.
3465 */
3466 if (!slot_ctx->dev_info)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003467 xhci_setup_addressable_virt_dev(xhci, udev);
Andiry Xuf0615c42010-10-14 07:22:48 -07003468 /* Otherwise, update the control endpoint ring enqueue pointer. */
Sarah Sharp2d1ee592010-07-09 17:08:54 +02003469 else
3470 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
Sarah Sharp66e49d82009-07-27 12:03:46 -07003471 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07003472 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003473
Sarah Sharpf88ba782009-05-14 11:44:22 -07003474 spin_lock_irqsave(&xhci->lock, flags);
John Yound115b042009-07-27 12:05:15 -07003475 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3476 udev->slot_id);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003477 if (ret) {
3478 spin_unlock_irqrestore(&xhci->lock, flags);
3479 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3480 return ret;
3481 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07003482 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003483 spin_unlock_irqrestore(&xhci->lock, flags);
3484
3485 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3486 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3487 USB_CTRL_SET_TIMEOUT);
3488 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3489 * the SetAddress() "recovery interval" required by USB and aborting the
3490 * command on a timeout.
3491 */
3492 if (timeleft <= 0) {
Andiry Xucd681762011-09-23 14:19:55 -07003493 xhci_warn(xhci, "%s while waiting for address device command\n",
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003494 timeleft == 0 ? "Timeout" : "Signal");
3495 /* FIXME cancel the address device command */
3496 return -ETIME;
3497 }
3498
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003499 switch (virt_dev->cmd_status) {
3500 case COMP_CTX_STATE:
3501 case COMP_EBADSLT:
3502 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3503 udev->slot_id);
3504 ret = -EINVAL;
3505 break;
3506 case COMP_TX_ERR:
3507 dev_warn(&udev->dev, "Device not responding to set address.\n");
3508 ret = -EPROTO;
3509 break;
Alex Hef6ba6fe2011-06-08 18:34:06 +08003510 case COMP_DEV_ERR:
3511 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3512 "device command.\n");
3513 ret = -ENODEV;
3514 break;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003515 case COMP_SUCCESS:
3516 xhci_dbg(xhci, "Successful Address Device command\n");
3517 break;
3518 default:
3519 xhci_err(xhci, "ERROR: unexpected command completion "
3520 "code 0x%x.\n", virt_dev->cmd_status);
Sarah Sharp66e49d82009-07-27 12:03:46 -07003521 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07003522 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003523 ret = -EINVAL;
3524 break;
3525 }
3526 if (ret) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003527 return ret;
3528 }
Sarah Sharp8e595a52009-07-27 12:03:31 -07003529 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3530 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3531 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
Matt Evans28ccd292011-03-29 13:40:46 +11003532 udev->slot_id,
3533 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3534 (unsigned long long)
3535 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07003536 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
John Yound115b042009-07-27 12:05:15 -07003537 (unsigned long long)virt_dev->out_ctx->dma);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003538 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07003539 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003540 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07003541 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003542 /*
3543 * USB core uses address 1 for the roothubs, so we add one to the
3544 * address given back to us by the HC.
3545 */
John Yound115b042009-07-27 12:05:15 -07003546 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
Andiry Xuc8d4af82010-10-14 07:22:51 -07003547 /* Use kernel assigned address for devices; store xHC assigned
3548 * address locally. */
Matt Evans28ccd292011-03-29 13:40:46 +11003549 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3550 + 1;
Sarah Sharpf94e01862009-04-27 19:58:38 -07003551 /* Zero the input context control for later use */
John Yound115b042009-07-27 12:05:15 -07003552 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3553 ctrl_ctx->add_flags = 0;
3554 ctrl_ctx->drop_flags = 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003555
Andiry Xuc8d4af82010-10-14 07:22:51 -07003556 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07003557
3558 return 0;
3559}
3560
Andiry Xu95743232011-09-23 14:19:51 -07003561#ifdef CONFIG_USB_SUSPEND
3562
3563/* BESL to HIRD Encoding array for USB2 LPM */
3564static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3565 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3566
3567/* Calculate HIRD/BESL for USB2 PORTPMSC*/
3568static int xhci_calculate_hird_besl(int u2del, bool use_besl)
3569{
3570 int hird;
3571
3572 if (use_besl) {
3573 for (hird = 0; hird < 16; hird++) {
3574 if (xhci_besl_encoding[hird] >= u2del)
3575 break;
3576 }
3577 } else {
3578 if (u2del <= 50)
3579 hird = 0;
3580 else
3581 hird = (u2del - 51) / 75 + 1;
3582
3583 if (hird > 15)
3584 hird = 15;
3585 }
3586
3587 return hird;
3588}
3589
3590static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3591 struct usb_device *udev)
3592{
3593 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3594 struct dev_info *dev_info;
3595 __le32 __iomem **port_array;
3596 __le32 __iomem *addr, *pm_addr;
3597 u32 temp, dev_id;
3598 unsigned int port_num;
3599 unsigned long flags;
3600 int u2del, hird;
3601 int ret;
3602
3603 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3604 !udev->lpm_capable)
3605 return -EINVAL;
3606
3607 /* we only support lpm for non-hub device connected to root hub yet */
3608 if (!udev->parent || udev->parent->parent ||
3609 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3610 return -EINVAL;
3611
3612 spin_lock_irqsave(&xhci->lock, flags);
3613
3614 /* Look for devices in lpm_failed_devs list */
3615 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3616 le16_to_cpu(udev->descriptor.idProduct);
3617 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3618 if (dev_info->dev_id == dev_id) {
3619 ret = -EINVAL;
3620 goto finish;
3621 }
3622 }
3623
3624 port_array = xhci->usb2_ports;
3625 port_num = udev->portnum - 1;
3626
3627 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3628 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3629 ret = -EINVAL;
3630 goto finish;
3631 }
3632
3633 /*
3634 * Test USB 2.0 software LPM.
3635 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3636 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3637 * in the June 2011 errata release.
3638 */
3639 xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3640 /*
3641 * Set L1 Device Slot and HIRD/BESL.
3642 * Check device's USB 2.0 extension descriptor to determine whether
3643 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3644 */
3645 pm_addr = port_array[port_num] + 1;
3646 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3647 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
3648 hird = xhci_calculate_hird_besl(u2del, 1);
3649 else
3650 hird = xhci_calculate_hird_besl(u2del, 0);
3651
3652 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3653 xhci_writel(xhci, temp, pm_addr);
3654
3655 /* Set port link state to U2(L1) */
3656 addr = port_array[port_num];
3657 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3658
3659 /* wait for ACK */
3660 spin_unlock_irqrestore(&xhci->lock, flags);
3661 msleep(10);
3662 spin_lock_irqsave(&xhci->lock, flags);
3663
3664 /* Check L1 Status */
3665 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3666 if (ret != -ETIMEDOUT) {
3667 /* enter L1 successfully */
3668 temp = xhci_readl(xhci, addr);
3669 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3670 port_num, temp);
3671 ret = 0;
3672 } else {
3673 temp = xhci_readl(xhci, pm_addr);
3674 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3675 port_num, temp & PORT_L1S_MASK);
3676 ret = -EINVAL;
3677 }
3678
3679 /* Resume the port */
3680 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3681
3682 spin_unlock_irqrestore(&xhci->lock, flags);
3683 msleep(10);
3684 spin_lock_irqsave(&xhci->lock, flags);
3685
3686 /* Clear PLC */
3687 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3688
3689 /* Check PORTSC to make sure the device is in the right state */
3690 if (!ret) {
3691 temp = xhci_readl(xhci, addr);
3692 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3693 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3694 (temp & PORT_PLS_MASK) != XDEV_U0) {
3695 xhci_dbg(xhci, "port L1 resume fail\n");
3696 ret = -EINVAL;
3697 }
3698 }
3699
3700 if (ret) {
3701 /* Insert dev to lpm_failed_devs list */
3702 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3703 "re-enumerate\n");
3704 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3705 if (!dev_info) {
3706 ret = -ENOMEM;
3707 goto finish;
3708 }
3709 dev_info->dev_id = dev_id;
3710 INIT_LIST_HEAD(&dev_info->list);
3711 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3712 } else {
3713 xhci_ring_device(xhci, udev->slot_id);
3714 }
3715
3716finish:
3717 spin_unlock_irqrestore(&xhci->lock, flags);
3718 return ret;
3719}
3720
Andiry Xu65580b432011-09-23 14:19:52 -07003721int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3722 struct usb_device *udev, int enable)
3723{
3724 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3725 __le32 __iomem **port_array;
3726 __le32 __iomem *pm_addr;
3727 u32 temp;
3728 unsigned int port_num;
3729 unsigned long flags;
3730 int u2del, hird;
3731
3732 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3733 !udev->lpm_capable)
3734 return -EPERM;
3735
3736 if (!udev->parent || udev->parent->parent ||
3737 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3738 return -EPERM;
3739
3740 if (udev->usb2_hw_lpm_capable != 1)
3741 return -EPERM;
3742
3743 spin_lock_irqsave(&xhci->lock, flags);
3744
3745 port_array = xhci->usb2_ports;
3746 port_num = udev->portnum - 1;
3747 pm_addr = port_array[port_num] + 1;
3748 temp = xhci_readl(xhci, pm_addr);
3749
3750 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3751 enable ? "enable" : "disable", port_num);
3752
3753 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3754 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2))
3755 hird = xhci_calculate_hird_besl(u2del, 1);
3756 else
3757 hird = xhci_calculate_hird_besl(u2del, 0);
3758
3759 if (enable) {
3760 temp &= ~PORT_HIRD_MASK;
3761 temp |= PORT_HIRD(hird) | PORT_RWE;
3762 xhci_writel(xhci, temp, pm_addr);
3763 temp = xhci_readl(xhci, pm_addr);
3764 temp |= PORT_HLE;
3765 xhci_writel(xhci, temp, pm_addr);
3766 } else {
3767 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3768 xhci_writel(xhci, temp, pm_addr);
3769 }
3770
3771 spin_unlock_irqrestore(&xhci->lock, flags);
3772 return 0;
3773}
3774
Andiry Xu95743232011-09-23 14:19:51 -07003775int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3776{
3777 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3778 int ret;
3779
3780 ret = xhci_usb2_software_lpm_test(hcd, udev);
Andiry Xu65580b432011-09-23 14:19:52 -07003781 if (!ret) {
Andiry Xu95743232011-09-23 14:19:51 -07003782 xhci_dbg(xhci, "software LPM test succeed\n");
Andiry Xu65580b432011-09-23 14:19:52 -07003783 if (xhci->hw_lpm_support == 1) {
3784 udev->usb2_hw_lpm_capable = 1;
3785 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
3786 if (!ret)
3787 udev->usb2_hw_lpm_enabled = 1;
3788 }
3789 }
Andiry Xu95743232011-09-23 14:19:51 -07003790
3791 return 0;
3792}
3793
3794#else
3795
Andiry Xu65580b432011-09-23 14:19:52 -07003796int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3797 struct usb_device *udev, int enable)
3798{
3799 return 0;
3800}
3801
Andiry Xu95743232011-09-23 14:19:51 -07003802int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3803{
3804 return 0;
3805}
3806
3807#endif /* CONFIG_USB_SUSPEND */
3808
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003809/* Once a hub descriptor is fetched for a device, we need to update the xHC's
3810 * internal data structures for the device.
3811 */
3812int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
3813 struct usb_tt *tt, gfp_t mem_flags)
3814{
3815 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3816 struct xhci_virt_device *vdev;
3817 struct xhci_command *config_cmd;
3818 struct xhci_input_control_ctx *ctrl_ctx;
3819 struct xhci_slot_ctx *slot_ctx;
3820 unsigned long flags;
3821 unsigned think_time;
3822 int ret;
3823
3824 /* Ignore root hubs */
3825 if (!hdev->parent)
3826 return 0;
3827
3828 vdev = xhci->devs[hdev->slot_id];
3829 if (!vdev) {
3830 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
3831 return -EINVAL;
3832 }
Sarah Sharpa1d78c12009-12-09 15:59:03 -08003833 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003834 if (!config_cmd) {
3835 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3836 return -ENOMEM;
3837 }
3838
3839 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp839c8172011-09-02 11:05:47 -07003840 if (hdev->speed == USB_SPEED_HIGH &&
3841 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
3842 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
3843 xhci_free_command(xhci, config_cmd);
3844 spin_unlock_irqrestore(&xhci->lock, flags);
3845 return -ENOMEM;
3846 }
3847
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003848 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
3849 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11003850 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003851 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11003852 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003853 if (tt->multi)
Matt Evans28ccd292011-03-29 13:40:46 +11003854 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003855 if (xhci->hci_version > 0x95) {
3856 xhci_dbg(xhci, "xHCI version %x needs hub "
3857 "TT think time and number of ports\n",
3858 (unsigned int) xhci->hci_version);
Matt Evans28ccd292011-03-29 13:40:46 +11003859 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003860 /* Set TT think time - convert from ns to FS bit times.
3861 * 0 = 8 FS bit times, 1 = 16 FS bit times,
3862 * 2 = 24 FS bit times, 3 = 32 FS bit times.
Andiry Xu700b4172011-05-05 18:14:05 +08003863 *
3864 * xHCI 1.0: this field shall be 0 if the device is not a
3865 * High-spped hub.
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003866 */
3867 think_time = tt->think_time;
3868 if (think_time != 0)
3869 think_time = (think_time / 666) - 1;
Andiry Xu700b4172011-05-05 18:14:05 +08003870 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
3871 slot_ctx->tt_info |=
3872 cpu_to_le32(TT_THINK_TIME(think_time));
Sarah Sharpac1c1b72009-09-04 10:53:20 -07003873 } else {
3874 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
3875 "TT think time or number of ports\n",
3876 (unsigned int) xhci->hci_version);
3877 }
3878 slot_ctx->dev_state = 0;
3879 spin_unlock_irqrestore(&xhci->lock, flags);
3880
3881 xhci_dbg(xhci, "Set up %s for hub device.\n",
3882 (xhci->hci_version > 0x95) ?
3883 "configure endpoint" : "evaluate context");
3884 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
3885 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
3886
3887 /* Issue and wait for the configure endpoint or
3888 * evaluate context command.
3889 */
3890 if (xhci->hci_version > 0x95)
3891 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
3892 false, false);
3893 else
3894 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
3895 true, false);
3896
3897 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
3898 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
3899
3900 xhci_free_command(xhci, config_cmd);
3901 return ret;
3902}
3903
Sarah Sharp66d4ead2009-04-27 19:52:28 -07003904int xhci_get_frame(struct usb_hcd *hcd)
3905{
3906 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3907 /* EHCI mods by the periodic size. Why? */
3908 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
3909}
3910
3911MODULE_DESCRIPTION(DRIVER_DESC);
3912MODULE_AUTHOR(DRIVER_AUTHOR);
3913MODULE_LICENSE("GPL");
3914
3915static int __init xhci_hcd_init(void)
3916{
3917#ifdef CONFIG_PCI
3918 int retval = 0;
3919
3920 retval = xhci_register_pci();
3921
3922 if (retval < 0) {
3923 printk(KERN_DEBUG "Problem registering PCI driver.");
3924 return retval;
3925 }
3926#endif
Sarah Sharp98441972009-05-14 11:44:18 -07003927 /*
3928 * Check the compiler generated sizes of structures that must be laid
3929 * out in specific ways for hardware access.
3930 */
3931 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
3932 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
3933 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
3934 /* xhci_device_control has eight fields, and also
3935 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
3936 */
Sarah Sharp98441972009-05-14 11:44:18 -07003937 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
3938 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
3939 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
3940 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
3941 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
3942 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
3943 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
3944 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
Sarah Sharp66d4ead2009-04-27 19:52:28 -07003945 return 0;
3946}
3947module_init(xhci_hcd_init);
3948
3949static void __exit xhci_hcd_cleanup(void)
3950{
3951#ifdef CONFIG_PCI
3952 xhci_unregister_pci();
3953#endif
3954}
3955module_exit(xhci_hcd_cleanup);