blob: e8ab1899c88e03112095b170a92db428eabd42da [file] [log] [blame]
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Dong Nguyen43b86af2010-07-21 16:56:08 -070023#include <linux/pci.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070024#include <linux/irq.h>
Sarah Sharp8df75f42010-04-02 15:34:16 -070025#include <linux/log2.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070026#include <linux/module.h>
Sarah Sharpb0567b32009-08-07 14:04:36 -070027#include <linux/moduleparam.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070029
30#include "xhci.h"
31
32#define DRIVER_AUTHOR "Sarah Sharp"
33#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
34
Sarah Sharpb0567b32009-08-07 14:04:36 -070035/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36static int link_quirk;
37module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
39
Sarah Sharp66d4ead2009-04-27 19:52:28 -070040/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/*
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
47 *
48 * Returns negative errno, or zero on success
49 *
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
53 */
54static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 u32 mask, u32 done, int usec)
56{
57 u32 result;
58
59 do {
60 result = xhci_readl(xhci, ptr);
61 if (result == ~(u32)0) /* card removed */
62 return -ENODEV;
63 result &= mask;
64 if (result == done)
65 return 0;
66 udelay(1);
67 usec--;
68 } while (usec > 0);
69 return -ETIMEDOUT;
70}
71
72/*
Sarah Sharp4f0f0ba2009-10-27 10:56:33 -070073 * Disable interrupts and begin the xHCI halting process.
74 */
75void xhci_quiesce(struct xhci_hcd *xhci)
76{
77 u32 halted;
78 u32 cmd;
79 u32 mask;
80
81 mask = ~(XHCI_IRQS);
82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
83 if (!halted)
84 mask &= ~CMD_RUN;
85
86 cmd = xhci_readl(xhci, &xhci->op_regs->command);
87 cmd &= mask;
88 xhci_writel(xhci, cmd, &xhci->op_regs->command);
89}
90
91/*
Sarah Sharp66d4ead2009-04-27 19:52:28 -070092 * Force HC into halt state.
93 *
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
Andiry Xubdfca502011-01-06 15:43:39 +080096 * should halt within 16 ms of the run/stop bit being cleared.
Sarah Sharp66d4ead2009-04-27 19:52:28 -070097 * Read HC Halted bit in the status register to see when the HC is finished.
Sarah Sharp66d4ead2009-04-27 19:52:28 -070098 */
99int xhci_halt(struct xhci_hcd *xhci)
100{
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800101 int ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700102 xhci_dbg(xhci, "// Halt the HC\n");
Sarah Sharp4f0f0ba2009-10-27 10:56:33 -0700103 xhci_quiesce(xhci);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700104
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800105 ret = handshake(xhci, &xhci->op_regs->status,
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800107 if (!ret)
108 xhci->xhc_state |= XHCI_STATE_HALTED;
109 return ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700110}
111
112/*
Sarah Sharped074532010-05-24 13:25:21 -0700113 * Set the run bit and wait for the host to be running.
114 */
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800115static int xhci_start(struct xhci_hcd *xhci)
Sarah Sharped074532010-05-24 13:25:21 -0700116{
117 u32 temp;
118 int ret;
119
120 temp = xhci_readl(xhci, &xhci->op_regs->command);
121 temp |= (CMD_RUN);
122 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
123 temp);
124 xhci_writel(xhci, temp, &xhci->op_regs->command);
125
126 /*
127 * Wait for the HCHalted Status bit to be 0 to indicate the host is
128 * running.
129 */
130 ret = handshake(xhci, &xhci->op_regs->status,
131 STS_HALT, 0, XHCI_MAX_HALT_USEC);
132 if (ret == -ETIMEDOUT)
133 xhci_err(xhci, "Host took too long to start, "
134 "waited %u microseconds.\n",
135 XHCI_MAX_HALT_USEC);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -0800136 if (!ret)
137 xhci->xhc_state &= ~XHCI_STATE_HALTED;
Sarah Sharped074532010-05-24 13:25:21 -0700138 return ret;
139}
140
141/*
Sarah Sharpac04e6f2011-03-11 08:47:33 -0800142 * Reset a halted HC.
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700143 *
144 * This resets pipelines, timers, counters, state machines, etc.
145 * Transactions will be terminated immediately, and operational registers
146 * will be set to their defaults.
147 */
148int xhci_reset(struct xhci_hcd *xhci)
149{
150 u32 command;
151 u32 state;
Sarah Sharp2d62f3e2010-05-24 13:25:15 -0700152 int ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700153
154 state = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharpd3512f62009-07-27 12:03:50 -0700155 if ((state & STS_HALT) == 0) {
156 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
157 return 0;
158 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700159
160 xhci_dbg(xhci, "// Reset the HC\n");
161 command = xhci_readl(xhci, &xhci->op_regs->command);
162 command |= CMD_RESET;
163 xhci_writel(xhci, command, &xhci->op_regs->command);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700164
Sarah Sharp2d62f3e2010-05-24 13:25:15 -0700165 ret = handshake(xhci, &xhci->op_regs->command,
166 CMD_RESET, 0, 250 * 1000);
167 if (ret)
168 return ret;
169
170 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
171 /*
172 * xHCI cannot write to any doorbells or operational registers other
173 * than status until the "Controller Not Ready" flag is cleared.
174 */
175 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700176}
177
Dong Nguyen43b86af2010-07-21 16:56:08 -0700178/*
179 * Free IRQs
180 * free all IRQs request
181 */
182static void xhci_free_irq(struct xhci_hcd *xhci)
183{
184 int i;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700185 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
186
Dong Nguyen43b86af2010-07-21 16:56:08 -0700187 /* return if using legacy interrupt */
188 if (xhci_to_hcd(xhci)->irq >= 0)
189 return;
190
191 if (xhci->msix_entries) {
192 for (i = 0; i < xhci->msix_count; i++)
193 if (xhci->msix_entries[i].vector)
194 free_irq(xhci->msix_entries[i].vector,
195 xhci_to_hcd(xhci));
196 } else if (pdev->irq >= 0)
197 free_irq(pdev->irq, xhci_to_hcd(xhci));
198
199 return;
200}
201
202/*
203 * Set up MSI
204 */
205static int xhci_setup_msi(struct xhci_hcd *xhci)
206{
207 int ret;
208 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
209
210 ret = pci_enable_msi(pdev);
211 if (ret) {
212 xhci_err(xhci, "failed to allocate MSI entry\n");
213 return ret;
214 }
215
216 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
217 0, "xhci_hcd", xhci_to_hcd(xhci));
218 if (ret) {
219 xhci_err(xhci, "disable MSI interrupt\n");
220 pci_disable_msi(pdev);
221 }
222
223 return ret;
224}
225
226/*
227 * Set up MSI-X
228 */
229static int xhci_setup_msix(struct xhci_hcd *xhci)
230{
231 int i, ret = 0;
Andiry Xu00292272010-12-27 17:39:02 +0800232 struct usb_hcd *hcd = xhci_to_hcd(xhci);
233 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700234
235 /*
236 * calculate number of msi-x vectors supported.
237 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
238 * with max number of interrupters based on the xhci HCSPARAMS1.
239 * - num_online_cpus: maximum msi-x vectors per CPUs core.
240 * Add additional 1 vector to ensure always available interrupt.
241 */
242 xhci->msix_count = min(num_online_cpus() + 1,
243 HCS_MAX_INTRS(xhci->hcs_params1));
244
245 xhci->msix_entries =
246 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
Greg Kroah-Hartman86871972010-11-11 09:41:02 -0800247 GFP_KERNEL);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700248 if (!xhci->msix_entries) {
249 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
250 return -ENOMEM;
251 }
Dong Nguyen43b86af2010-07-21 16:56:08 -0700252
253 for (i = 0; i < xhci->msix_count; i++) {
254 xhci->msix_entries[i].entry = i;
255 xhci->msix_entries[i].vector = 0;
256 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700257
258 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
259 if (ret) {
260 xhci_err(xhci, "Failed to enable MSI-X\n");
261 goto free_entries;
262 }
263
Dong Nguyen43b86af2010-07-21 16:56:08 -0700264 for (i = 0; i < xhci->msix_count; i++) {
265 ret = request_irq(xhci->msix_entries[i].vector,
266 (irq_handler_t)xhci_msi_irq,
267 0, "xhci_hcd", xhci_to_hcd(xhci));
268 if (ret)
269 goto disable_msix;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700270 }
Dong Nguyen43b86af2010-07-21 16:56:08 -0700271
Andiry Xu00292272010-12-27 17:39:02 +0800272 hcd->msix_enabled = 1;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700273 return ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700274
275disable_msix:
Dong Nguyen43b86af2010-07-21 16:56:08 -0700276 xhci_err(xhci, "disable MSI-X interrupt\n");
277 xhci_free_irq(xhci);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700278 pci_disable_msix(pdev);
279free_entries:
280 kfree(xhci->msix_entries);
281 xhci->msix_entries = NULL;
282 return ret;
283}
284
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700285/* Free any IRQs and disable MSI-X */
286static void xhci_cleanup_msix(struct xhci_hcd *xhci)
287{
Andiry Xu00292272010-12-27 17:39:02 +0800288 struct usb_hcd *hcd = xhci_to_hcd(xhci);
289 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700290
Dong Nguyen43b86af2010-07-21 16:56:08 -0700291 xhci_free_irq(xhci);
292
293 if (xhci->msix_entries) {
294 pci_disable_msix(pdev);
295 kfree(xhci->msix_entries);
296 xhci->msix_entries = NULL;
297 } else {
298 pci_disable_msi(pdev);
299 }
300
Andiry Xu00292272010-12-27 17:39:02 +0800301 hcd->msix_enabled = 0;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700302 return;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700303}
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700304
305/*
306 * Initialize memory for HCD and xHC (one-time init).
307 *
308 * Program the PAGESIZE register, initialize the device context array, create
309 * device contexts (?), set up a command ring segment (or two?), create event
310 * ring (one for now).
311 */
312int xhci_init(struct usb_hcd *hcd)
313{
314 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
315 int retval = 0;
316
317 xhci_dbg(xhci, "xhci_init\n");
318 spin_lock_init(&xhci->lock);
Sarah Sharpb0567b32009-08-07 14:04:36 -0700319 if (link_quirk) {
320 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
321 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
322 } else {
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700323 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
Sarah Sharpb0567b32009-08-07 14:04:36 -0700324 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700325 retval = xhci_mem_init(xhci, GFP_KERNEL);
326 xhci_dbg(xhci, "Finished xhci_init\n");
327
328 return retval;
329}
330
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700331/*-------------------------------------------------------------------------*/
332
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700333
334#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800335static void xhci_event_ring_work(unsigned long arg)
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700336{
337 unsigned long flags;
338 int temp;
Sarah Sharp8e595a52009-07-27 12:03:31 -0700339 u64 temp_64;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700340 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
341 int i, j;
342
343 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
344
345 spin_lock_irqsave(&xhci->lock, flags);
346 temp = xhci_readl(xhci, &xhci->op_regs->status);
347 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700348 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
Sarah Sharpe4ab05d2009-09-16 16:42:30 -0700349 xhci_dbg(xhci, "HW died, polling stopped.\n");
350 spin_unlock_irqrestore(&xhci->lock, flags);
351 return;
352 }
353
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700354 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
355 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700356 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
357 xhci->error_bitmask = 0;
358 xhci_dbg(xhci, "Event ring:\n");
359 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
360 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
Sarah Sharp8e595a52009-07-27 12:03:31 -0700361 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
362 temp_64 &= ~ERST_PTR_MASK;
363 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700364 xhci_dbg(xhci, "Command ring:\n");
365 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
366 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
367 xhci_dbg_cmd_ptrs(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700368 for (i = 0; i < MAX_HC_SLOTS; ++i) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700369 if (!xhci->devs[i])
370 continue;
371 for (j = 0; j < 31; ++j) {
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700372 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700373 }
374 }
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700375 spin_unlock_irqrestore(&xhci->lock, flags);
376
377 if (!xhci->zombie)
378 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
379 else
380 xhci_dbg(xhci, "Quit polling the event ring.\n");
381}
382#endif
383
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800384static int xhci_run_finished(struct xhci_hcd *xhci)
385{
386 if (xhci_start(xhci)) {
387 xhci_halt(xhci);
388 return -ENODEV;
389 }
390 xhci->shared_hcd->state = HC_STATE_RUNNING;
391
392 if (xhci->quirks & XHCI_NEC_HOST)
393 xhci_ring_cmd_db(xhci);
394
395 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
396 return 0;
397}
398
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700399/*
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700400 * Start the HC after it was halted.
401 *
402 * This function is called by the USB core when the HC driver is added.
403 * Its opposite is xhci_stop().
404 *
405 * xhci_init() must be called once before this function can be called.
406 * Reset the HC, enable device slot contexts, program DCBAAP, and
407 * set command ring pointer and event ring pointer.
408 *
409 * Setup MSI-X vectors and enable interrupts.
410 */
411int xhci_run(struct usb_hcd *hcd)
412{
413 u32 temp;
Sarah Sharp8e595a52009-07-27 12:03:31 -0700414 u64 temp_64;
Dong Nguyen43b86af2010-07-21 16:56:08 -0700415 u32 ret;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700416 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700417 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700418
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800419 /* Start the xHCI host controller running only after the USB 2.0 roothub
420 * is setup.
421 */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700422
Sarah Sharp0f2a7932009-04-27 19:57:12 -0700423 hcd->uses_new_polling = 1;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800424 if (!usb_hcd_is_primary_hcd(hcd))
425 return xhci_run_finished(xhci);
Sarah Sharp0f2a7932009-04-27 19:57:12 -0700426
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700427 xhci_dbg(xhci, "xhci_run\n");
Dong Nguyen43b86af2010-07-21 16:56:08 -0700428 /* unregister the legacy interrupt */
429 if (hcd->irq)
430 free_irq(hcd->irq, hcd);
431 hcd->irq = -1;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700432
Dong Nguyen43b86af2010-07-21 16:56:08 -0700433 ret = xhci_setup_msix(xhci);
434 if (ret)
435 /* fall back to msi*/
436 ret = xhci_setup_msi(xhci);
437
438 if (ret) {
439 /* fall back to legacy interrupt*/
440 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
441 hcd->irq_descr, hcd);
442 if (ret) {
443 xhci_err(xhci, "request interrupt %d failed\n",
444 pdev->irq);
445 return ret;
446 }
447 hcd->irq = pdev->irq;
448 }
449
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700450#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
451 init_timer(&xhci->event_ring_timer);
452 xhci->event_ring_timer.data = (unsigned long) xhci;
Sarah Sharp23e3be12009-04-29 19:05:20 -0700453 xhci->event_ring_timer.function = xhci_event_ring_work;
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700454 /* Poll the event ring */
455 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
456 xhci->zombie = 0;
457 xhci_dbg(xhci, "Setting event ring polling timer\n");
458 add_timer(&xhci->event_ring_timer);
459#endif
460
Sarah Sharp66e49d82009-07-27 12:03:46 -0700461 xhci_dbg(xhci, "Command ring memory map follows:\n");
462 xhci_debug_ring(xhci, xhci->cmd_ring);
463 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
464 xhci_dbg_cmd_ptrs(xhci);
465
466 xhci_dbg(xhci, "ERST memory map follows:\n");
467 xhci_dbg_erst(xhci, &xhci->erst);
468 xhci_dbg(xhci, "Event ring:\n");
469 xhci_debug_ring(xhci, xhci->event_ring);
470 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
471 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
472 temp_64 &= ~ERST_PTR_MASK;
473 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
474
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700475 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
476 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
Sarah Sharpa4d88302009-05-14 11:44:26 -0700477 temp &= ~ER_IRQ_INTERVAL_MASK;
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700478 temp |= (u32) 160;
479 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
480
481 /* Set the HCD state before we enable the irqs */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700482 temp = xhci_readl(xhci, &xhci->op_regs->command);
483 temp |= (CMD_EIE);
484 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
485 temp);
486 xhci_writel(xhci, temp, &xhci->op_regs->command);
487
488 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700489 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
490 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700491 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
492 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800493 xhci_print_ir_set(xhci, 0);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700494
Sarah Sharp02386342010-05-24 13:25:28 -0700495 if (xhci->quirks & XHCI_NEC_HOST)
496 xhci_queue_vendor_command(xhci, 0, 0, 0,
497 TRB_TYPE(TRB_NEC_GET_FW));
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700498
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800499 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700500 return 0;
501}
502
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800503static void xhci_only_stop_hcd(struct usb_hcd *hcd)
504{
505 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
506
507 spin_lock_irq(&xhci->lock);
508 xhci_halt(xhci);
509
510 /* The shared_hcd is going to be deallocated shortly (the USB core only
511 * calls this function when allocation fails in usb_add_hcd(), or
512 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
513 */
514 xhci->shared_hcd = NULL;
515 spin_unlock_irq(&xhci->lock);
516}
517
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700518/*
519 * Stop xHCI driver.
520 *
521 * This function is called by the USB core when the HC driver is removed.
522 * Its opposite is xhci_run().
523 *
524 * Disable device contexts, disable IRQs, and quiesce the HC.
525 * Reset the HC, finish any completed transactions, and cleanup memory.
526 */
527void xhci_stop(struct usb_hcd *hcd)
528{
529 u32 temp;
530 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
531
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800532 if (!usb_hcd_is_primary_hcd(hcd)) {
533 xhci_only_stop_hcd(xhci->shared_hcd);
534 return;
535 }
536
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700537 spin_lock_irq(&xhci->lock);
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800538 /* Make sure the xHC is halted for a USB3 roothub
539 * (xhci_stop() could be called as part of failed init).
540 */
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700541 xhci_halt(xhci);
542 xhci_reset(xhci);
543 spin_unlock_irq(&xhci->lock);
544
Zhang Rui40a9fb12010-12-17 13:17:04 -0800545 xhci_cleanup_msix(xhci);
546
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700547#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
548 /* Tell the event ring poll function not to reschedule */
549 xhci->zombie = 1;
550 del_timer_sync(&xhci->event_ring_timer);
551#endif
552
Andiry Xuc41136b2011-03-22 17:08:14 +0800553 if (xhci->quirks & XHCI_AMD_PLL_FIX)
554 usb_amd_dev_put();
555
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700556 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
557 temp = xhci_readl(xhci, &xhci->op_regs->status);
558 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
559 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
560 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
561 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800562 xhci_print_ir_set(xhci, 0);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700563
564 xhci_dbg(xhci, "cleaning up memory\n");
565 xhci_mem_cleanup(xhci);
566 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
567 xhci_readl(xhci, &xhci->op_regs->status));
568}
569
570/*
571 * Shutdown HC (not bus-specific)
572 *
573 * This is called when the machine is rebooting or halting. We assume that the
574 * machine will be powered off, and the HC's internal state will be reset.
575 * Don't bother to free memory.
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800576 *
577 * This will only ever be called with the main usb_hcd (the USB3 roothub).
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700578 */
579void xhci_shutdown(struct usb_hcd *hcd)
580{
581 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
582
583 spin_lock_irq(&xhci->lock);
584 xhci_halt(xhci);
Dong Nguyen43b86af2010-07-21 16:56:08 -0700585 spin_unlock_irq(&xhci->lock);
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700586
Zhang Rui40a9fb12010-12-17 13:17:04 -0800587 xhci_cleanup_msix(xhci);
588
Sarah Sharp66d4ead2009-04-27 19:52:28 -0700589 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
590 xhci_readl(xhci, &xhci->op_regs->status));
591}
592
Sarah Sharpb5b5c3a2010-10-15 11:24:14 -0700593#ifdef CONFIG_PM
Andiry Xu5535b1d2010-10-14 07:23:06 -0700594static void xhci_save_registers(struct xhci_hcd *xhci)
595{
596 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
597 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
598 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
599 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
600 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
601 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
602 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
603 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
604 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
605}
606
607static void xhci_restore_registers(struct xhci_hcd *xhci)
608{
609 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
610 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
611 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
612 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
613 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
614 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
615 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
616 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
617}
618
Sarah Sharp89821322010-11-12 11:59:31 -0800619static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
620{
621 u64 val_64;
622
623 /* step 2: initialize command ring buffer */
624 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
625 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
626 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
627 xhci->cmd_ring->dequeue) &
628 (u64) ~CMD_RING_RSVD_BITS) |
629 xhci->cmd_ring->cycle_state;
630 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
631 (long unsigned long) val_64);
632 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
633}
634
635/*
636 * The whole command ring must be cleared to zero when we suspend the host.
637 *
638 * The host doesn't save the command ring pointer in the suspend well, so we
639 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
640 * aligned, because of the reserved bits in the command ring dequeue pointer
641 * register. Therefore, we can't just set the dequeue pointer back in the
642 * middle of the ring (TRBs are 16-byte aligned).
643 */
644static void xhci_clear_command_ring(struct xhci_hcd *xhci)
645{
646 struct xhci_ring *ring;
647 struct xhci_segment *seg;
648
649 ring = xhci->cmd_ring;
650 seg = ring->deq_seg;
651 do {
652 memset(seg->trbs, 0, SEGMENT_SIZE);
653 seg = seg->next;
654 } while (seg != ring->deq_seg);
655
656 /* Reset the software enqueue and dequeue pointers */
657 ring->deq_seg = ring->first_seg;
658 ring->dequeue = ring->first_seg->trbs;
659 ring->enq_seg = ring->deq_seg;
660 ring->enqueue = ring->dequeue;
661
662 /*
663 * Ring is now zeroed, so the HW should look for change of ownership
664 * when the cycle bit is set to 1.
665 */
666 ring->cycle_state = 1;
667
668 /*
669 * Reset the hardware dequeue pointer.
670 * Yes, this will need to be re-written after resume, but we're paranoid
671 * and want to make sure the hardware doesn't access bogus memory
672 * because, say, the BIOS or an SMI started the host without changing
673 * the command ring pointers.
674 */
675 xhci_set_cmd_ring_deq(xhci);
676}
677
Andiry Xu5535b1d2010-10-14 07:23:06 -0700678/*
679 * Stop HC (not bus-specific)
680 *
681 * This is called when the machine transition into S3/S4 mode.
682 *
683 */
684int xhci_suspend(struct xhci_hcd *xhci)
685{
686 int rc = 0;
687 struct usb_hcd *hcd = xhci_to_hcd(xhci);
688 u32 command;
Andiry Xu00292272010-12-27 17:39:02 +0800689 int i;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700690
691 spin_lock_irq(&xhci->lock);
692 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Sarah Sharpb3209372011-03-07 11:24:07 -0800693 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700694 /* step 1: stop endpoint */
695 /* skipped assuming that port suspend has done */
696
697 /* step 2: clear Run/Stop bit */
698 command = xhci_readl(xhci, &xhci->op_regs->command);
699 command &= ~CMD_RUN;
700 xhci_writel(xhci, command, &xhci->op_regs->command);
701 if (handshake(xhci, &xhci->op_regs->status,
702 STS_HALT, STS_HALT, 100*100)) {
703 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
704 spin_unlock_irq(&xhci->lock);
705 return -ETIMEDOUT;
706 }
Sarah Sharp89821322010-11-12 11:59:31 -0800707 xhci_clear_command_ring(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700708
709 /* step 3: save registers */
710 xhci_save_registers(xhci);
711
712 /* step 4: set CSS flag */
713 command = xhci_readl(xhci, &xhci->op_regs->command);
714 command |= CMD_CSS;
715 xhci_writel(xhci, command, &xhci->op_regs->command);
716 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
717 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
718 spin_unlock_irq(&xhci->lock);
719 return -ETIMEDOUT;
720 }
Andiry Xu5535b1d2010-10-14 07:23:06 -0700721 spin_unlock_irq(&xhci->lock);
722
Andiry Xu00292272010-12-27 17:39:02 +0800723 /* step 5: remove core well power */
724 /* synchronize irq when using MSI-X */
725 if (xhci->msix_entries) {
726 for (i = 0; i < xhci->msix_count; i++)
727 synchronize_irq(xhci->msix_entries[i].vector);
728 }
729
Andiry Xu5535b1d2010-10-14 07:23:06 -0700730 return rc;
731}
732
733/*
734 * start xHC (not bus-specific)
735 *
736 * This is called when the machine transition from S3/S4 mode.
737 *
738 */
739int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
740{
741 u32 command, temp = 0;
742 struct usb_hcd *hcd = xhci_to_hcd(xhci);
Sarah Sharp65b22f92010-12-17 12:35:05 -0800743 struct usb_hcd *secondary_hcd;
Andiry Xu019a35f2011-01-06 15:43:17 +0800744 int retval;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700745
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800746 /* Wait a bit if either of the roothubs need to settle from the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300747 * transition into bus suspend.
Sarah Sharp20b67cf2010-12-15 12:47:14 -0800748 */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800749 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
750 time_before(jiffies,
751 xhci->bus_state[1].next_statechange))
Andiry Xu5535b1d2010-10-14 07:23:06 -0700752 msleep(100);
753
754 spin_lock_irq(&xhci->lock);
755
756 if (!hibernated) {
757 /* step 1: restore register */
758 xhci_restore_registers(xhci);
759 /* step 2: initialize command ring buffer */
Sarah Sharp89821322010-11-12 11:59:31 -0800760 xhci_set_cmd_ring_deq(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700761 /* step 3: restore state and start state*/
762 /* step 3: set CRS flag */
763 command = xhci_readl(xhci, &xhci->op_regs->command);
764 command |= CMD_CRS;
765 xhci_writel(xhci, command, &xhci->op_regs->command);
766 if (handshake(xhci, &xhci->op_regs->status,
767 STS_RESTORE, 0, 10*100)) {
768 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
769 spin_unlock_irq(&xhci->lock);
770 return -ETIMEDOUT;
771 }
772 temp = xhci_readl(xhci, &xhci->op_regs->status);
773 }
774
775 /* If restore operation fails, re-initialize the HC during resume */
776 if ((temp & STS_SRE) || hibernated) {
Sarah Sharpfedd3832011-04-12 17:43:19 -0700777 /* Let the USB core know _both_ roothubs lost power. */
778 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
779 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700780
781 xhci_dbg(xhci, "Stop HCD\n");
782 xhci_halt(xhci);
783 xhci_reset(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700784 spin_unlock_irq(&xhci->lock);
Andiry Xu00292272010-12-27 17:39:02 +0800785 xhci_cleanup_msix(xhci);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700786
787#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
788 /* Tell the event ring poll function not to reschedule */
789 xhci->zombie = 1;
790 del_timer_sync(&xhci->event_ring_timer);
791#endif
792
793 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
794 temp = xhci_readl(xhci, &xhci->op_regs->status);
795 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
796 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
797 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
798 &xhci->ir_set->irq_pending);
Dmitry Torokhov09ece302011-02-08 16:29:33 -0800799 xhci_print_ir_set(xhci, 0);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700800
801 xhci_dbg(xhci, "cleaning up memory\n");
802 xhci_mem_cleanup(xhci);
803 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
804 xhci_readl(xhci, &xhci->op_regs->status));
805
Sarah Sharp65b22f92010-12-17 12:35:05 -0800806 /* USB core calls the PCI reinit and start functions twice:
807 * first with the primary HCD, and then with the secondary HCD.
808 * If we don't do the same, the host will never be started.
809 */
810 if (!usb_hcd_is_primary_hcd(hcd))
811 secondary_hcd = hcd;
812 else
813 secondary_hcd = xhci->shared_hcd;
814
815 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
816 retval = xhci_init(hcd->primary_hcd);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700817 if (retval)
818 return retval;
Sarah Sharp65b22f92010-12-17 12:35:05 -0800819 xhci_dbg(xhci, "Start the primary HCD\n");
820 retval = xhci_run(hcd->primary_hcd);
821 if (retval)
822 goto failed_restart;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700823
Sarah Sharp65b22f92010-12-17 12:35:05 -0800824 xhci_dbg(xhci, "Start the secondary HCD\n");
825 retval = xhci_run(secondary_hcd);
Sarah Sharpb3209372011-03-07 11:24:07 -0800826 if (!retval) {
Andiry Xu5535b1d2010-10-14 07:23:06 -0700827 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Sarah Sharpb3209372011-03-07 11:24:07 -0800828 set_bit(HCD_FLAG_HW_ACCESSIBLE,
829 &xhci->shared_hcd->flags);
830 }
Sarah Sharp65b22f92010-12-17 12:35:05 -0800831failed_restart:
Andiry Xu5535b1d2010-10-14 07:23:06 -0700832 hcd->state = HC_STATE_SUSPENDED;
Sarah Sharpb3209372011-03-07 11:24:07 -0800833 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
Andiry Xu5535b1d2010-10-14 07:23:06 -0700834 return retval;
835 }
836
Andiry Xu5535b1d2010-10-14 07:23:06 -0700837 /* step 4: set Run/Stop bit */
838 command = xhci_readl(xhci, &xhci->op_regs->command);
839 command |= CMD_RUN;
840 xhci_writel(xhci, command, &xhci->op_regs->command);
841 handshake(xhci, &xhci->op_regs->status, STS_HALT,
842 0, 250 * 1000);
843
844 /* step 5: walk topology and initialize portsc,
845 * portpmsc and portli
846 */
847 /* this is done in bus_resume */
848
849 /* step 6: restart each of the previously
850 * Running endpoints by ringing their doorbells
851 */
852
853 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Sarah Sharpb3209372011-03-07 11:24:07 -0800854 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
Andiry Xu5535b1d2010-10-14 07:23:06 -0700855
856 spin_unlock_irq(&xhci->lock);
857 return 0;
858}
Sarah Sharpb5b5c3a2010-10-15 11:24:14 -0700859#endif /* CONFIG_PM */
860
Sarah Sharp7f84eef2009-04-27 19:53:56 -0700861/*-------------------------------------------------------------------------*/
862
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700863/**
864 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
865 * HCDs. Find the index for an endpoint given its descriptor. Use the return
866 * value to right shift 1 for the bitmask.
867 *
868 * Index = (epnum * 2) + direction - 1,
869 * where direction = 0 for OUT, 1 for IN.
870 * For control endpoints, the IN index is used (OUT index is unused), so
871 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
872 */
873unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
874{
875 unsigned int index;
876 if (usb_endpoint_xfer_control(desc))
877 index = (unsigned int) (usb_endpoint_num(desc)*2);
878 else
879 index = (unsigned int) (usb_endpoint_num(desc)*2) +
880 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
881 return index;
882}
883
Sarah Sharpf94e01862009-04-27 19:58:38 -0700884/* Find the flag for this endpoint (for use in the control context). Use the
885 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
886 * bit 1, etc.
887 */
888unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
889{
890 return 1 << (xhci_get_endpoint_index(desc) + 1);
891}
892
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700893/* Find the flag for this endpoint (for use in the control context). Use the
894 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
895 * bit 1, etc.
896 */
897unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
898{
899 return 1 << (ep_index + 1);
900}
901
Sarah Sharpf94e01862009-04-27 19:58:38 -0700902/* Compute the last valid endpoint context index. Basically, this is the
903 * endpoint index plus one. For slot contexts with more than valid endpoint,
904 * we find the most significant bit set in the added contexts flags.
905 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
906 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
907 */
Sarah Sharpac9d8fe2009-08-07 14:04:55 -0700908unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
Sarah Sharpf94e01862009-04-27 19:58:38 -0700909{
910 return fls(added_ctxs) - 1;
911}
912
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700913/* Returns 1 if the arguments are OK;
914 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
915 */
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800916static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
Andiry Xu64927732010-10-14 07:22:45 -0700917 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
918 const char *func) {
919 struct xhci_hcd *xhci;
920 struct xhci_virt_device *virt_dev;
921
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700922 if (!hcd || (check_ep && !ep) || !udev) {
923 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
924 func);
925 return -EINVAL;
926 }
927 if (!udev->parent) {
928 printk(KERN_DEBUG "xHCI %s called for root hub\n",
929 func);
930 return 0;
931 }
Andiry Xu64927732010-10-14 07:22:45 -0700932
933 if (check_virt_dev) {
934 xhci = hcd_to_xhci(hcd);
935 if (!udev->slot_id || !xhci->devs
936 || !xhci->devs[udev->slot_id]) {
937 printk(KERN_DEBUG "xHCI %s called with unaddressed "
938 "device\n", func);
939 return -EINVAL;
940 }
941
942 virt_dev = xhci->devs[udev->slot_id];
943 if (virt_dev->udev != udev) {
944 printk(KERN_DEBUG "xHCI %s called with udev and "
945 "virt_dev does not match\n", func);
946 return -EINVAL;
947 }
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700948 }
Andiry Xu64927732010-10-14 07:22:45 -0700949
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -0700950 return 1;
951}
952
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -0700953static int xhci_configure_endpoint(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -0700954 struct usb_device *udev, struct xhci_command *command,
955 bool ctx_change, bool must_succeed);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -0700956
957/*
958 * Full speed devices may have a max packet size greater than 8 bytes, but the
959 * USB core doesn't know that until it reads the first 8 bytes of the
960 * descriptor. If the usb_device's max packet size changes after that point,
961 * we need to issue an evaluate context command and wait on it.
962 */
963static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
964 unsigned int ep_index, struct urb *urb)
965{
966 struct xhci_container_ctx *in_ctx;
967 struct xhci_container_ctx *out_ctx;
968 struct xhci_input_control_ctx *ctrl_ctx;
969 struct xhci_ep_ctx *ep_ctx;
970 int max_packet_size;
971 int hw_max_packet_size;
972 int ret = 0;
973
974 out_ctx = xhci->devs[slot_id]->out_ctx;
975 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +1100976 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
977 max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -0700978 if (hw_max_packet_size != max_packet_size) {
979 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
980 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
981 max_packet_size);
982 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
983 hw_max_packet_size);
984 xhci_dbg(xhci, "Issuing evaluate context command.\n");
985
986 /* Set up the modified control endpoint 0 */
Sarah Sharp913a8a32009-09-04 10:53:13 -0700987 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
988 xhci->devs[slot_id]->out_ctx, ep_index);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -0700989 in_ctx = xhci->devs[slot_id]->in_ctx;
990 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
Matt Evans28ccd292011-03-29 13:40:46 +1100991 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
992 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -0700993
994 /* Set up the input context flags for the command */
995 /* FIXME: This won't work if a non-default control endpoint
996 * changes max packet sizes.
997 */
998 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +1100999 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001000 ctrl_ctx->drop_flags = 0;
1001
1002 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1003 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1004 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1005 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1006
Sarah Sharp913a8a32009-09-04 10:53:13 -07001007 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1008 true, false);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001009
1010 /* Clean up the input context for later use by bandwidth
1011 * functions.
1012 */
Matt Evans28ccd292011-03-29 13:40:46 +11001013 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001014 }
1015 return ret;
1016}
1017
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001018/*
1019 * non-error returns are a promise to giveback() the urb later
1020 * we drop ownership so next owner (or urb unlink) can get it
1021 */
1022int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1023{
1024 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1025 unsigned long flags;
1026 int ret = 0;
1027 unsigned int slot_id, ep_index;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001028 struct urb_priv *urb_priv;
1029 int size, i;
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001030
Andiry Xu64927732010-10-14 07:22:45 -07001031 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1032 true, true, __func__) <= 0)
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001033 return -EINVAL;
1034
1035 slot_id = urb->dev->slot_id;
1036 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001037
Alan Stern541c7d42010-06-22 16:39:10 -04001038 if (!HCD_HW_ACCESSIBLE(hcd)) {
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001039 if (!in_interrupt())
1040 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1041 ret = -ESHUTDOWN;
1042 goto exit;
1043 }
Andiry Xu8e51adc2010-07-22 15:23:31 -07001044
1045 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1046 size = urb->number_of_packets;
1047 else
1048 size = 1;
1049
1050 urb_priv = kzalloc(sizeof(struct urb_priv) +
1051 size * sizeof(struct xhci_td *), mem_flags);
1052 if (!urb_priv)
1053 return -ENOMEM;
1054
1055 for (i = 0; i < size; i++) {
1056 urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
1057 if (!urb_priv->td[i]) {
1058 urb_priv->length = i;
1059 xhci_urb_free_priv(xhci, urb_priv);
1060 return -ENOMEM;
1061 }
1062 }
1063
1064 urb_priv->length = size;
1065 urb_priv->td_cnt = 0;
1066 urb->hcpriv = urb_priv;
1067
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001068 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1069 /* Check to see if the max packet size for the default control
1070 * endpoint changed during FS device enumeration
1071 */
1072 if (urb->dev->speed == USB_SPEED_FULL) {
1073 ret = xhci_check_maxpacket(xhci, slot_id,
1074 ep_index, urb);
1075 if (ret < 0)
1076 return ret;
1077 }
1078
Sarah Sharpb11069f2009-07-27 12:03:23 -07001079 /* We have a spinlock and interrupts disabled, so we must pass
1080 * atomic context to this function, which may allocate memory.
1081 */
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001082 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001083 if (xhci->xhc_state & XHCI_STATE_DYING)
1084 goto dying;
Sarah Sharpb11069f2009-07-27 12:03:23 -07001085 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
Sarah Sharp23e3be12009-04-29 19:05:20 -07001086 slot_id, ep_index);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001087 spin_unlock_irqrestore(&xhci->lock, flags);
1088 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1089 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001090 if (xhci->xhc_state & XHCI_STATE_DYING)
1091 goto dying;
Sarah Sharp8df75f42010-04-02 15:34:16 -07001092 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1093 EP_GETTING_STREAMS) {
1094 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1095 "is transitioning to using streams.\n");
1096 ret = -EINVAL;
1097 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1098 EP_GETTING_NO_STREAMS) {
1099 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1100 "is transitioning to "
1101 "not having streams.\n");
1102 ret = -EINVAL;
1103 } else {
1104 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1105 slot_id, ep_index);
1106 }
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001107 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp624defa2009-09-02 12:14:28 -07001108 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1109 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001110 if (xhci->xhc_state & XHCI_STATE_DYING)
1111 goto dying;
Sarah Sharp624defa2009-09-02 12:14:28 -07001112 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1113 slot_id, ep_index);
1114 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001115 } else {
Andiry Xu787f4e52010-07-22 15:23:52 -07001116 spin_lock_irqsave(&xhci->lock, flags);
1117 if (xhci->xhc_state & XHCI_STATE_DYING)
1118 goto dying;
1119 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1120 slot_id, ep_index);
1121 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp2d3f1fa2009-08-07 14:04:49 -07001122 }
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001123exit:
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001124 return ret;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001125dying:
Andiry Xu8e51adc2010-07-22 15:23:31 -07001126 xhci_urb_free_priv(xhci, urb_priv);
1127 urb->hcpriv = NULL;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001128 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1129 "non-responsive xHCI host.\n",
1130 urb->ep->desc.bEndpointAddress, urb);
1131 spin_unlock_irqrestore(&xhci->lock, flags);
1132 return -ESHUTDOWN;
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001133}
1134
Sarah Sharp021bff92010-07-29 22:12:20 -07001135/* Get the right ring for the given URB.
1136 * If the endpoint supports streams, boundary check the URB's stream ID.
1137 * If the endpoint doesn't support streams, return the singular endpoint ring.
1138 */
1139static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1140 struct urb *urb)
1141{
1142 unsigned int slot_id;
1143 unsigned int ep_index;
1144 unsigned int stream_id;
1145 struct xhci_virt_ep *ep;
1146
1147 slot_id = urb->dev->slot_id;
1148 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1149 stream_id = urb->stream_id;
1150 ep = &xhci->devs[slot_id]->eps[ep_index];
1151 /* Common case: no streams */
1152 if (!(ep->ep_state & EP_HAS_STREAMS))
1153 return ep->ring;
1154
1155 if (stream_id == 0) {
1156 xhci_warn(xhci,
1157 "WARN: Slot ID %u, ep index %u has streams, "
1158 "but URB has no stream ID.\n",
1159 slot_id, ep_index);
1160 return NULL;
1161 }
1162
1163 if (stream_id < ep->stream_info->num_streams)
1164 return ep->stream_info->stream_rings[stream_id];
1165
1166 xhci_warn(xhci,
1167 "WARN: Slot ID %u, ep index %u has "
1168 "stream IDs 1 to %u allocated, "
1169 "but stream ID %u is requested.\n",
1170 slot_id, ep_index,
1171 ep->stream_info->num_streams - 1,
1172 stream_id);
1173 return NULL;
1174}
1175
Sarah Sharpae636742009-04-29 19:02:31 -07001176/*
1177 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1178 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1179 * should pick up where it left off in the TD, unless a Set Transfer Ring
1180 * Dequeue Pointer is issued.
1181 *
1182 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1183 * the ring. Since the ring is a contiguous structure, they can't be physically
1184 * removed. Instead, there are two options:
1185 *
1186 * 1) If the HC is in the middle of processing the URB to be canceled, we
1187 * simply move the ring's dequeue pointer past those TRBs using the Set
1188 * Transfer Ring Dequeue Pointer command. This will be the common case,
1189 * when drivers timeout on the last submitted URB and attempt to cancel.
1190 *
1191 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1192 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1193 * HC will need to invalidate the any TRBs it has cached after the stop
1194 * endpoint command, as noted in the xHCI 0.95 errata.
1195 *
1196 * 3) The TD may have completed by the time the Stop Endpoint Command
1197 * completes, so software needs to handle that case too.
1198 *
1199 * This function should protect against the TD enqueueing code ringing the
1200 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1201 * It also needs to account for multiple cancellations on happening at the same
1202 * time for the same endpoint.
1203 *
1204 * Note that this function can be called in any context, or so says
1205 * usb_hcd_unlink_urb()
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001206 */
1207int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1208{
Sarah Sharpae636742009-04-29 19:02:31 -07001209 unsigned long flags;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001210 int ret, i;
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001211 u32 temp;
Sarah Sharpae636742009-04-29 19:02:31 -07001212 struct xhci_hcd *xhci;
Andiry Xu8e51adc2010-07-22 15:23:31 -07001213 struct urb_priv *urb_priv;
Sarah Sharpae636742009-04-29 19:02:31 -07001214 struct xhci_td *td;
1215 unsigned int ep_index;
1216 struct xhci_ring *ep_ring;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001217 struct xhci_virt_ep *ep;
Sarah Sharpae636742009-04-29 19:02:31 -07001218
1219 xhci = hcd_to_xhci(hcd);
1220 spin_lock_irqsave(&xhci->lock, flags);
1221 /* Make sure the URB hasn't completed or been unlinked already */
1222 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1223 if (ret || !urb->hcpriv)
1224 goto done;
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001225 temp = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharpc6cc27c2011-03-11 10:20:58 -08001226 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001227 xhci_dbg(xhci, "HW died, freeing TD.\n");
Andiry Xu8e51adc2010-07-22 15:23:31 -07001228 urb_priv = urb->hcpriv;
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001229
1230 usb_hcd_unlink_urb_from_ep(hcd, urb);
1231 spin_unlock_irqrestore(&xhci->lock, flags);
Sarah Sharp214f76f2010-10-26 11:22:02 -07001232 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
Andiry Xu8e51adc2010-07-22 15:23:31 -07001233 xhci_urb_free_priv(xhci, urb_priv);
Sarah Sharpe34b2fb2009-09-28 17:21:37 -07001234 return ret;
1235 }
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001236 if (xhci->xhc_state & XHCI_STATE_DYING) {
1237 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1238 "non-responsive xHCI host.\n",
1239 urb->ep->desc.bEndpointAddress, urb);
1240 /* Let the stop endpoint command watchdog timer (which set this
1241 * state) finish cleaning up the endpoint TD lists. We must
1242 * have caught it in the middle of dropping a lock and giving
1243 * back an URB.
1244 */
1245 goto done;
1246 }
Sarah Sharpae636742009-04-29 19:02:31 -07001247
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001248 xhci_dbg(xhci, "Cancel URB %p\n", urb);
Sarah Sharp66e49d82009-07-27 12:03:46 -07001249 xhci_dbg(xhci, "Event ring:\n");
1250 xhci_debug_ring(xhci, xhci->event_ring);
Sarah Sharpae636742009-04-29 19:02:31 -07001251 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001252 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001253 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1254 if (!ep_ring) {
1255 ret = -EINVAL;
1256 goto done;
1257 }
1258
Sarah Sharp66e49d82009-07-27 12:03:46 -07001259 xhci_dbg(xhci, "Endpoint ring:\n");
1260 xhci_debug_ring(xhci, ep_ring);
Sarah Sharpae636742009-04-29 19:02:31 -07001261
Andiry Xu8e51adc2010-07-22 15:23:31 -07001262 urb_priv = urb->hcpriv;
1263
1264 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1265 td = urb_priv->td[i];
1266 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1267 }
1268
Sarah Sharpae636742009-04-29 19:02:31 -07001269 /* Queue a stop endpoint command, but only if this is
1270 * the first cancellation to be handled.
1271 */
Sarah Sharp678539c2009-10-27 10:55:52 -07001272 if (!(ep->ep_state & EP_HALT_PENDING)) {
1273 ep->ep_state |= EP_HALT_PENDING;
Sarah Sharp6f5165c2009-10-27 10:57:01 -07001274 ep->stop_cmds_pending++;
1275 ep->stop_cmd_timer.expires = jiffies +
1276 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1277 add_timer(&ep->stop_cmd_timer);
Andiry Xube88fe42010-10-14 07:22:57 -07001278 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
Sarah Sharp23e3be12009-04-29 19:05:20 -07001279 xhci_ring_cmd_db(xhci);
Sarah Sharpae636742009-04-29 19:02:31 -07001280 }
1281done:
1282 spin_unlock_irqrestore(&xhci->lock, flags);
1283 return ret;
Sarah Sharpd0e96f5a2009-04-27 19:58:01 -07001284}
1285
Sarah Sharpf94e01862009-04-27 19:58:38 -07001286/* Drop an endpoint from a new bandwidth configuration for this device.
1287 * Only one call to this function is allowed per endpoint before
1288 * check_bandwidth() or reset_bandwidth() must be called.
1289 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1290 * add the endpoint to the schedule with possibly new parameters denoted by a
1291 * different endpoint descriptor in usb_host_endpoint.
1292 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1293 * not allowed.
Sarah Sharpf88ba782009-05-14 11:44:22 -07001294 *
1295 * The USB core will not allow URBs to be queued to an endpoint that is being
1296 * disabled, so there's no need for mutual exclusion to protect
1297 * the xhci->devs[slot_id] structure.
Sarah Sharpf94e01862009-04-27 19:58:38 -07001298 */
1299int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1300 struct usb_host_endpoint *ep)
1301{
Sarah Sharpf94e01862009-04-27 19:58:38 -07001302 struct xhci_hcd *xhci;
John Yound115b042009-07-27 12:05:15 -07001303 struct xhci_container_ctx *in_ctx, *out_ctx;
1304 struct xhci_input_control_ctx *ctrl_ctx;
1305 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001306 unsigned int last_ctx;
1307 unsigned int ep_index;
1308 struct xhci_ep_ctx *ep_ctx;
1309 u32 drop_flag;
1310 u32 new_add_flags, new_drop_flags, new_slot_info;
1311 int ret;
1312
Andiry Xu64927732010-10-14 07:22:45 -07001313 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001314 if (ret <= 0)
1315 return ret;
1316 xhci = hcd_to_xhci(hcd);
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001317 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001318
1319 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1320 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1321 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1322 __func__, drop_flag);
1323 return 0;
1324 }
1325
Sarah Sharpf94e01862009-04-27 19:58:38 -07001326 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
John Yound115b042009-07-27 12:05:15 -07001327 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1328 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001329 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001330 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001331 /* If the HC already knows the endpoint is disabled,
1332 * or the HCD has noted it is disabled, ignore this request
1333 */
Matt Evans28ccd292011-03-29 13:40:46 +11001334 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
1335 EP_STATE_DISABLED ||
1336 le32_to_cpu(ctrl_ctx->drop_flags) &
1337 xhci_get_endpoint_flag(&ep->desc)) {
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001338 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1339 __func__, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001340 return 0;
1341 }
1342
Matt Evans28ccd292011-03-29 13:40:46 +11001343 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1344 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001345
Matt Evans28ccd292011-03-29 13:40:46 +11001346 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1347 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001348
Matt Evans28ccd292011-03-29 13:40:46 +11001349 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
John Yound115b042009-07-27 12:05:15 -07001350 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001351 /* Update the last valid endpoint context, if we deleted the last one */
Matt Evans28ccd292011-03-29 13:40:46 +11001352 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1353 LAST_CTX(last_ctx)) {
1354 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1355 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001356 }
Matt Evans28ccd292011-03-29 13:40:46 +11001357 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001358
1359 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1360
Sarah Sharpf94e01862009-04-27 19:58:38 -07001361 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1362 (unsigned int) ep->desc.bEndpointAddress,
1363 udev->slot_id,
1364 (unsigned int) new_drop_flags,
1365 (unsigned int) new_add_flags,
1366 (unsigned int) new_slot_info);
1367 return 0;
1368}
1369
1370/* Add an endpoint to a new possible bandwidth configuration for this device.
1371 * Only one call to this function is allowed per endpoint before
1372 * check_bandwidth() or reset_bandwidth() must be called.
1373 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1374 * add the endpoint to the schedule with possibly new parameters denoted by a
1375 * different endpoint descriptor in usb_host_endpoint.
1376 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1377 * not allowed.
Sarah Sharpf88ba782009-05-14 11:44:22 -07001378 *
1379 * The USB core will not allow URBs to be queued to an endpoint until the
1380 * configuration or alt setting is installed in the device, so there's no need
1381 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
Sarah Sharpf94e01862009-04-27 19:58:38 -07001382 */
1383int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1384 struct usb_host_endpoint *ep)
1385{
Sarah Sharpf94e01862009-04-27 19:58:38 -07001386 struct xhci_hcd *xhci;
John Yound115b042009-07-27 12:05:15 -07001387 struct xhci_container_ctx *in_ctx, *out_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001388 unsigned int ep_index;
1389 struct xhci_ep_ctx *ep_ctx;
John Yound115b042009-07-27 12:05:15 -07001390 struct xhci_slot_ctx *slot_ctx;
1391 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001392 u32 added_ctxs;
1393 unsigned int last_ctx;
1394 u32 new_add_flags, new_drop_flags, new_slot_info;
1395 int ret = 0;
1396
Andiry Xu64927732010-10-14 07:22:45 -07001397 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001398 if (ret <= 0) {
1399 /* So we won't queue a reset ep command for a root hub */
1400 ep->hcpriv = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001401 return ret;
Sarah Sharpa1587d92009-07-27 12:03:15 -07001402 }
Sarah Sharpf94e01862009-04-27 19:58:38 -07001403 xhci = hcd_to_xhci(hcd);
1404
1405 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1406 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1407 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1408 /* FIXME when we have to issue an evaluate endpoint command to
1409 * deal with ep0 max packet size changing once we get the
1410 * descriptors
1411 */
1412 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1413 __func__, added_ctxs);
1414 return 0;
1415 }
1416
Sarah Sharpf94e01862009-04-27 19:58:38 -07001417 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
John Yound115b042009-07-27 12:05:15 -07001418 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1419 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001420 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001421 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001422 /* If the HCD has already noted the endpoint is enabled,
1423 * ignore this request.
1424 */
Matt Evans28ccd292011-03-29 13:40:46 +11001425 if (le32_to_cpu(ctrl_ctx->add_flags) &
1426 xhci_get_endpoint_flag(&ep->desc)) {
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001427 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1428 __func__, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001429 return 0;
1430 }
1431
Sarah Sharpf88ba782009-05-14 11:44:22 -07001432 /*
1433 * Configuration and alternate setting changes must be done in
1434 * process context, not interrupt context (or so documenation
1435 * for usb_set_interface() and usb_set_configuration() claim).
1436 */
1437 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
Oliver Neukum319c3ea2009-12-16 19:43:59 +01001438 udev, ep, GFP_NOIO) < 0) {
Sarah Sharpf94e01862009-04-27 19:58:38 -07001439 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1440 __func__, ep->desc.bEndpointAddress);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001441 return -ENOMEM;
1442 }
1443
Matt Evans28ccd292011-03-29 13:40:46 +11001444 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1445 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001446
1447 /* If xhci_endpoint_disable() was called for this endpoint, but the
1448 * xHC hasn't been notified yet through the check_bandwidth() call,
1449 * this re-adds a new state for the endpoint from the new endpoint
1450 * descriptors. We must drop and re-add this endpoint, so we leave the
1451 * drop flags alone.
1452 */
Matt Evans28ccd292011-03-29 13:40:46 +11001453 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001454
John Yound115b042009-07-27 12:05:15 -07001455 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001456 /* Update the last valid endpoint context, if we just added one past */
Matt Evans28ccd292011-03-29 13:40:46 +11001457 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1458 LAST_CTX(last_ctx)) {
1459 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1460 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001461 }
Matt Evans28ccd292011-03-29 13:40:46 +11001462 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001463
Sarah Sharpa1587d92009-07-27 12:03:15 -07001464 /* Store the usb_device pointer for later use */
1465 ep->hcpriv = udev;
1466
Sarah Sharpf94e01862009-04-27 19:58:38 -07001467 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1468 (unsigned int) ep->desc.bEndpointAddress,
1469 udev->slot_id,
1470 (unsigned int) new_drop_flags,
1471 (unsigned int) new_add_flags,
1472 (unsigned int) new_slot_info);
1473 return 0;
1474}
1475
John Yound115b042009-07-27 12:05:15 -07001476static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
Sarah Sharpf94e01862009-04-27 19:58:38 -07001477{
John Yound115b042009-07-27 12:05:15 -07001478 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001479 struct xhci_ep_ctx *ep_ctx;
John Yound115b042009-07-27 12:05:15 -07001480 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001481 int i;
1482
1483 /* When a device's add flag and drop flag are zero, any subsequent
1484 * configure endpoint command will leave that endpoint's state
1485 * untouched. Make sure we don't leave any old state in the input
1486 * endpoint contexts.
1487 */
John Yound115b042009-07-27 12:05:15 -07001488 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1489 ctrl_ctx->drop_flags = 0;
1490 ctrl_ctx->add_flags = 0;
1491 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001492 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001493 /* Endpoint 0 is always valid */
Matt Evans28ccd292011-03-29 13:40:46 +11001494 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001495 for (i = 1; i < 31; ++i) {
John Yound115b042009-07-27 12:05:15 -07001496 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001497 ep_ctx->ep_info = 0;
1498 ep_ctx->ep_info2 = 0;
Sarah Sharp8e595a52009-07-27 12:03:31 -07001499 ep_ctx->deq = 0;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001500 ep_ctx->tx_info = 0;
1501 }
1502}
1503
Sarah Sharpf2217e82009-08-07 14:04:43 -07001504static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001505 struct usb_device *udev, int *cmd_status)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001506{
1507 int ret;
1508
Sarah Sharp913a8a32009-09-04 10:53:13 -07001509 switch (*cmd_status) {
Sarah Sharpf2217e82009-08-07 14:04:43 -07001510 case COMP_ENOMEM:
1511 dev_warn(&udev->dev, "Not enough host controller resources "
1512 "for new device state.\n");
1513 ret = -ENOMEM;
1514 /* FIXME: can we allocate more resources for the HC? */
1515 break;
1516 case COMP_BW_ERR:
1517 dev_warn(&udev->dev, "Not enough bandwidth "
1518 "for new device state.\n");
1519 ret = -ENOSPC;
1520 /* FIXME: can we go back to the old state? */
1521 break;
1522 case COMP_TRB_ERR:
1523 /* the HCD set up something wrong */
1524 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1525 "add flag = 1, "
1526 "and endpoint is not disabled.\n");
1527 ret = -EINVAL;
1528 break;
1529 case COMP_SUCCESS:
1530 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1531 ret = 0;
1532 break;
1533 default:
1534 xhci_err(xhci, "ERROR: unexpected command completion "
Sarah Sharp913a8a32009-09-04 10:53:13 -07001535 "code 0x%x.\n", *cmd_status);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001536 ret = -EINVAL;
1537 break;
1538 }
1539 return ret;
1540}
1541
1542static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001543 struct usb_device *udev, int *cmd_status)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001544{
1545 int ret;
Sarah Sharp913a8a32009-09-04 10:53:13 -07001546 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
Sarah Sharpf2217e82009-08-07 14:04:43 -07001547
Sarah Sharp913a8a32009-09-04 10:53:13 -07001548 switch (*cmd_status) {
Sarah Sharpf2217e82009-08-07 14:04:43 -07001549 case COMP_EINVAL:
1550 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1551 "context command.\n");
1552 ret = -EINVAL;
1553 break;
1554 case COMP_EBADSLT:
1555 dev_warn(&udev->dev, "WARN: slot not enabled for"
1556 "evaluate context command.\n");
1557 case COMP_CTX_STATE:
1558 dev_warn(&udev->dev, "WARN: invalid context state for "
1559 "evaluate context command.\n");
1560 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1561 ret = -EINVAL;
1562 break;
1563 case COMP_SUCCESS:
1564 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1565 ret = 0;
1566 break;
1567 default:
1568 xhci_err(xhci, "ERROR: unexpected command completion "
Sarah Sharp913a8a32009-09-04 10:53:13 -07001569 "code 0x%x.\n", *cmd_status);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001570 ret = -EINVAL;
1571 break;
1572 }
1573 return ret;
1574}
1575
1576/* Issue a configure endpoint command or evaluate context command
1577 * and wait for it to finish.
1578 */
1579static int xhci_configure_endpoint(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001580 struct usb_device *udev,
1581 struct xhci_command *command,
1582 bool ctx_change, bool must_succeed)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001583{
1584 int ret;
1585 int timeleft;
1586 unsigned long flags;
Sarah Sharp913a8a32009-09-04 10:53:13 -07001587 struct xhci_container_ctx *in_ctx;
1588 struct completion *cmd_completion;
Matt Evans28ccd292011-03-29 13:40:46 +11001589 u32 *cmd_status;
Sarah Sharp913a8a32009-09-04 10:53:13 -07001590 struct xhci_virt_device *virt_dev;
Sarah Sharpf2217e82009-08-07 14:04:43 -07001591
1592 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp913a8a32009-09-04 10:53:13 -07001593 virt_dev = xhci->devs[udev->slot_id];
1594 if (command) {
1595 in_ctx = command->in_ctx;
1596 cmd_completion = command->completion;
1597 cmd_status = &command->status;
1598 command->command_trb = xhci->cmd_ring->enqueue;
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08001599
1600 /* Enqueue pointer can be left pointing to the link TRB,
1601 * we must handle that
1602 */
Matt Evans28ccd292011-03-29 13:40:46 +11001603 if ((le32_to_cpu(command->command_trb->link.control)
1604 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08001605 command->command_trb =
1606 xhci->cmd_ring->enq_seg->next->trbs;
1607
Sarah Sharp913a8a32009-09-04 10:53:13 -07001608 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1609 } else {
1610 in_ctx = virt_dev->in_ctx;
1611 cmd_completion = &virt_dev->cmd_completion;
1612 cmd_status = &virt_dev->cmd_status;
1613 }
Andiry Xu1d680642010-03-12 17:10:04 +08001614 init_completion(cmd_completion);
Sarah Sharp913a8a32009-09-04 10:53:13 -07001615
Sarah Sharpf2217e82009-08-07 14:04:43 -07001616 if (!ctx_change)
Sarah Sharp913a8a32009-09-04 10:53:13 -07001617 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1618 udev->slot_id, must_succeed);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001619 else
Sarah Sharp913a8a32009-09-04 10:53:13 -07001620 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
Sarah Sharpf2217e82009-08-07 14:04:43 -07001621 udev->slot_id);
1622 if (ret < 0) {
Sarah Sharpc01591b2009-12-09 15:58:58 -08001623 if (command)
1624 list_del(&command->cmd_list);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001625 spin_unlock_irqrestore(&xhci->lock, flags);
1626 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1627 return -ENOMEM;
1628 }
1629 xhci_ring_cmd_db(xhci);
1630 spin_unlock_irqrestore(&xhci->lock, flags);
1631
1632 /* Wait for the configure endpoint command to complete */
1633 timeleft = wait_for_completion_interruptible_timeout(
Sarah Sharp913a8a32009-09-04 10:53:13 -07001634 cmd_completion,
Sarah Sharpf2217e82009-08-07 14:04:43 -07001635 USB_CTRL_SET_TIMEOUT);
1636 if (timeleft <= 0) {
1637 xhci_warn(xhci, "%s while waiting for %s command\n",
1638 timeleft == 0 ? "Timeout" : "Signal",
1639 ctx_change == 0 ?
1640 "configure endpoint" :
1641 "evaluate context");
1642 /* FIXME cancel the configure endpoint command */
1643 return -ETIME;
1644 }
1645
1646 if (!ctx_change)
Sarah Sharp913a8a32009-09-04 10:53:13 -07001647 return xhci_configure_endpoint_result(xhci, udev, cmd_status);
1648 return xhci_evaluate_context_result(xhci, udev, cmd_status);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001649}
1650
Sarah Sharpf88ba782009-05-14 11:44:22 -07001651/* Called after one or more calls to xhci_add_endpoint() or
1652 * xhci_drop_endpoint(). If this call fails, the USB core is expected
1653 * to call xhci_reset_bandwidth().
1654 *
1655 * Since we are in the middle of changing either configuration or
1656 * installing a new alt setting, the USB core won't allow URBs to be
1657 * enqueued for any endpoint on the old config or interface. Nothing
1658 * else should be touching the xhci->devs[slot_id] structure, so we
1659 * don't need to take the xhci->lock for manipulating that.
1660 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07001661int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1662{
1663 int i;
1664 int ret = 0;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001665 struct xhci_hcd *xhci;
1666 struct xhci_virt_device *virt_dev;
John Yound115b042009-07-27 12:05:15 -07001667 struct xhci_input_control_ctx *ctrl_ctx;
1668 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001669
Andiry Xu64927732010-10-14 07:22:45 -07001670 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001671 if (ret <= 0)
1672 return ret;
1673 xhci = hcd_to_xhci(hcd);
1674
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001675 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001676 virt_dev = xhci->devs[udev->slot_id];
1677
1678 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
John Yound115b042009-07-27 12:05:15 -07001679 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001680 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
1681 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
1682 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001683 xhci_dbg(xhci, "New Input Control Context:\n");
John Yound115b042009-07-27 12:05:15 -07001684 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1685 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
Matt Evans28ccd292011-03-29 13:40:46 +11001686 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001687
Sarah Sharp913a8a32009-09-04 10:53:13 -07001688 ret = xhci_configure_endpoint(xhci, udev, NULL,
1689 false, false);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001690 if (ret) {
1691 /* Callee should call reset_bandwidth() */
Sarah Sharpf94e01862009-04-27 19:58:38 -07001692 return ret;
1693 }
1694
1695 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
John Yound115b042009-07-27 12:05:15 -07001696 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
Matt Evans28ccd292011-03-29 13:40:46 +11001697 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001698
John Yound115b042009-07-27 12:05:15 -07001699 xhci_zero_in_ctx(xhci, virt_dev);
Sarah Sharp74f9fe22009-12-03 09:44:29 -08001700 /* Install new rings and free or cache any old rings */
Sarah Sharpf94e01862009-04-27 19:58:38 -07001701 for (i = 1; i < 31; ++i) {
Sarah Sharp74f9fe22009-12-03 09:44:29 -08001702 if (!virt_dev->eps[i].new_ring)
1703 continue;
1704 /* Only cache or free the old ring if it exists.
1705 * It may not if this is the first add of an endpoint.
1706 */
1707 if (virt_dev->eps[i].ring) {
Sarah Sharp412566b2009-12-09 15:59:01 -08001708 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001709 }
Sarah Sharp74f9fe22009-12-03 09:44:29 -08001710 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1711 virt_dev->eps[i].new_ring = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001712 }
1713
Sarah Sharpf94e01862009-04-27 19:58:38 -07001714 return ret;
1715}
1716
1717void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1718{
Sarah Sharpf94e01862009-04-27 19:58:38 -07001719 struct xhci_hcd *xhci;
1720 struct xhci_virt_device *virt_dev;
1721 int i, ret;
1722
Andiry Xu64927732010-10-14 07:22:45 -07001723 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001724 if (ret <= 0)
1725 return;
1726 xhci = hcd_to_xhci(hcd);
1727
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001728 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001729 virt_dev = xhci->devs[udev->slot_id];
1730 /* Free any rings allocated for added endpoints */
1731 for (i = 0; i < 31; ++i) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001732 if (virt_dev->eps[i].new_ring) {
1733 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1734 virt_dev->eps[i].new_ring = NULL;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001735 }
1736 }
John Yound115b042009-07-27 12:05:15 -07001737 xhci_zero_in_ctx(xhci, virt_dev);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001738}
1739
Sarah Sharp5270b952009-09-04 10:53:11 -07001740static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001741 struct xhci_container_ctx *in_ctx,
1742 struct xhci_container_ctx *out_ctx,
1743 u32 add_flags, u32 drop_flags)
Sarah Sharp5270b952009-09-04 10:53:11 -07001744{
1745 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharp913a8a32009-09-04 10:53:13 -07001746 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001747 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
1748 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
Sarah Sharp913a8a32009-09-04 10:53:13 -07001749 xhci_slot_copy(xhci, in_ctx, out_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11001750 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
Sarah Sharp5270b952009-09-04 10:53:11 -07001751
Sarah Sharp913a8a32009-09-04 10:53:13 -07001752 xhci_dbg(xhci, "Input Context:\n");
1753 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
Sarah Sharp5270b952009-09-04 10:53:11 -07001754}
1755
Dmitry Torokhov8212a492011-02-08 13:55:59 -08001756static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001757 unsigned int slot_id, unsigned int ep_index,
1758 struct xhci_dequeue_state *deq_state)
1759{
1760 struct xhci_container_ctx *in_ctx;
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001761 struct xhci_ep_ctx *ep_ctx;
1762 u32 added_ctxs;
1763 dma_addr_t addr;
1764
Sarah Sharp913a8a32009-09-04 10:53:13 -07001765 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1766 xhci->devs[slot_id]->out_ctx, ep_index);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001767 in_ctx = xhci->devs[slot_id]->in_ctx;
1768 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1769 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
1770 deq_state->new_deq_ptr);
1771 if (addr == 0) {
1772 xhci_warn(xhci, "WARN Cannot submit config ep after "
1773 "reset ep command\n");
1774 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1775 deq_state->new_deq_seg,
1776 deq_state->new_deq_ptr);
1777 return;
1778 }
Matt Evans28ccd292011-03-29 13:40:46 +11001779 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001780
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001781 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
Sarah Sharp913a8a32009-09-04 10:53:13 -07001782 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1783 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001784}
1785
Sarah Sharp82d10092009-08-07 14:04:52 -07001786void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001787 struct usb_device *udev, unsigned int ep_index)
Sarah Sharp82d10092009-08-07 14:04:52 -07001788{
1789 struct xhci_dequeue_state deq_state;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001790 struct xhci_virt_ep *ep;
Sarah Sharp82d10092009-08-07 14:04:52 -07001791
1792 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001793 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
Sarah Sharp82d10092009-08-07 14:04:52 -07001794 /* We need to move the HW's dequeue pointer past this TD,
1795 * or it will attempt to resend it on the next doorbell ring.
1796 */
1797 xhci_find_new_dequeue_state(xhci, udev->slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001798 ep_index, ep->stopped_stream, ep->stopped_td,
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001799 &deq_state);
Sarah Sharp82d10092009-08-07 14:04:52 -07001800
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001801 /* HW with the reset endpoint quirk will use the saved dequeue state to
1802 * issue a configure endpoint command later.
1803 */
1804 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1805 xhci_dbg(xhci, "Queueing new dequeue state\n");
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001806 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001807 ep_index, ep->stopped_stream, &deq_state);
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001808 } else {
1809 /* Better hope no one uses the input context between now and the
1810 * reset endpoint completion!
Sarah Sharpe9df17e2010-04-02 15:34:43 -07001811 * XXX: No idea how this hardware will react when stream rings
1812 * are enabled.
Sarah Sharpac9d8fe2009-08-07 14:04:55 -07001813 */
1814 xhci_dbg(xhci, "Setting up input context for "
1815 "configure endpoint command\n");
1816 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
1817 ep_index, &deq_state);
1818 }
Sarah Sharp82d10092009-08-07 14:04:52 -07001819}
1820
Sarah Sharpa1587d92009-07-27 12:03:15 -07001821/* Deal with stalled endpoints. The core should have sent the control message
1822 * to clear the halt condition. However, we need to make the xHCI hardware
1823 * reset its sequence number, since a device will expect a sequence number of
1824 * zero after the halt condition is cleared.
1825 * Context: in_interrupt
1826 */
1827void xhci_endpoint_reset(struct usb_hcd *hcd,
1828 struct usb_host_endpoint *ep)
1829{
1830 struct xhci_hcd *xhci;
1831 struct usb_device *udev;
1832 unsigned int ep_index;
1833 unsigned long flags;
1834 int ret;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001835 struct xhci_virt_ep *virt_ep;
Sarah Sharpa1587d92009-07-27 12:03:15 -07001836
1837 xhci = hcd_to_xhci(hcd);
1838 udev = (struct usb_device *) ep->hcpriv;
1839 /* Called with a root hub endpoint (or an endpoint that wasn't added
1840 * with xhci_add_endpoint()
1841 */
1842 if (!ep->hcpriv)
1843 return;
1844 ep_index = xhci_get_endpoint_index(&ep->desc);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001845 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1846 if (!virt_ep->stopped_td) {
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07001847 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1848 ep->desc.bEndpointAddress);
1849 return;
1850 }
Sarah Sharp82d10092009-08-07 14:04:52 -07001851 if (usb_endpoint_xfer_control(&ep->desc)) {
1852 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
1853 return;
1854 }
Sarah Sharpa1587d92009-07-27 12:03:15 -07001855
1856 xhci_dbg(xhci, "Queueing reset endpoint command\n");
1857 spin_lock_irqsave(&xhci->lock, flags);
1858 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
Sarah Sharpc92bcfa2009-07-27 12:05:21 -07001859 /*
1860 * Can't change the ring dequeue pointer until it's transitioned to the
1861 * stopped state, which is only upon a successful reset endpoint
1862 * command. Better hope that last command worked!
1863 */
Sarah Sharpa1587d92009-07-27 12:03:15 -07001864 if (!ret) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001865 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
1866 kfree(virt_ep->stopped_td);
Sarah Sharpa1587d92009-07-27 12:03:15 -07001867 xhci_ring_cmd_db(xhci);
1868 }
Sarah Sharp1624ae12010-05-06 13:40:08 -07001869 virt_ep->stopped_td = NULL;
1870 virt_ep->stopped_trb = NULL;
Sarah Sharp5e5cf6f2010-05-06 13:40:18 -07001871 virt_ep->stopped_stream = 0;
Sarah Sharpa1587d92009-07-27 12:03:15 -07001872 spin_unlock_irqrestore(&xhci->lock, flags);
1873
1874 if (ret)
1875 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1876}
1877
Sarah Sharp8df75f42010-04-02 15:34:16 -07001878static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1879 struct usb_device *udev, struct usb_host_endpoint *ep,
1880 unsigned int slot_id)
1881{
1882 int ret;
1883 unsigned int ep_index;
1884 unsigned int ep_state;
1885
1886 if (!ep)
1887 return -EINVAL;
Andiry Xu64927732010-10-14 07:22:45 -07001888 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
Sarah Sharp8df75f42010-04-02 15:34:16 -07001889 if (ret <= 0)
1890 return -EINVAL;
Alan Stern842f1692010-04-30 12:44:46 -04001891 if (ep->ss_ep_comp.bmAttributes == 0) {
Sarah Sharp8df75f42010-04-02 15:34:16 -07001892 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
1893 " descriptor for ep 0x%x does not support streams\n",
1894 ep->desc.bEndpointAddress);
1895 return -EINVAL;
1896 }
1897
1898 ep_index = xhci_get_endpoint_index(&ep->desc);
1899 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1900 if (ep_state & EP_HAS_STREAMS ||
1901 ep_state & EP_GETTING_STREAMS) {
1902 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
1903 "already has streams set up.\n",
1904 ep->desc.bEndpointAddress);
1905 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
1906 "dynamic stream context array reallocation.\n");
1907 return -EINVAL;
1908 }
1909 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
1910 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
1911 "endpoint 0x%x; URBs are pending.\n",
1912 ep->desc.bEndpointAddress);
1913 return -EINVAL;
1914 }
1915 return 0;
1916}
1917
1918static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
1919 unsigned int *num_streams, unsigned int *num_stream_ctxs)
1920{
1921 unsigned int max_streams;
1922
1923 /* The stream context array size must be a power of two */
1924 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
1925 /*
1926 * Find out how many primary stream array entries the host controller
1927 * supports. Later we may use secondary stream arrays (similar to 2nd
1928 * level page entries), but that's an optional feature for xHCI host
1929 * controllers. xHCs must support at least 4 stream IDs.
1930 */
1931 max_streams = HCC_MAX_PSA(xhci->hcc_params);
1932 if (*num_stream_ctxs > max_streams) {
1933 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
1934 max_streams);
1935 *num_stream_ctxs = max_streams;
1936 *num_streams = max_streams;
1937 }
1938}
1939
1940/* Returns an error code if one of the endpoint already has streams.
1941 * This does not change any data structures, it only checks and gathers
1942 * information.
1943 */
1944static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
1945 struct usb_device *udev,
1946 struct usb_host_endpoint **eps, unsigned int num_eps,
1947 unsigned int *num_streams, u32 *changed_ep_bitmask)
1948{
Sarah Sharp8df75f42010-04-02 15:34:16 -07001949 unsigned int max_streams;
1950 unsigned int endpoint_flag;
1951 int i;
1952 int ret;
1953
1954 for (i = 0; i < num_eps; i++) {
1955 ret = xhci_check_streams_endpoint(xhci, udev,
1956 eps[i], udev->slot_id);
1957 if (ret < 0)
1958 return ret;
1959
Alan Stern842f1692010-04-30 12:44:46 -04001960 max_streams = USB_SS_MAX_STREAMS(
1961 eps[i]->ss_ep_comp.bmAttributes);
Sarah Sharp8df75f42010-04-02 15:34:16 -07001962 if (max_streams < (*num_streams - 1)) {
1963 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
1964 eps[i]->desc.bEndpointAddress,
1965 max_streams);
1966 *num_streams = max_streams+1;
1967 }
1968
1969 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
1970 if (*changed_ep_bitmask & endpoint_flag)
1971 return -EINVAL;
1972 *changed_ep_bitmask |= endpoint_flag;
1973 }
1974 return 0;
1975}
1976
1977static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
1978 struct usb_device *udev,
1979 struct usb_host_endpoint **eps, unsigned int num_eps)
1980{
1981 u32 changed_ep_bitmask = 0;
1982 unsigned int slot_id;
1983 unsigned int ep_index;
1984 unsigned int ep_state;
1985 int i;
1986
1987 slot_id = udev->slot_id;
1988 if (!xhci->devs[slot_id])
1989 return 0;
1990
1991 for (i = 0; i < num_eps; i++) {
1992 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1993 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1994 /* Are streams already being freed for the endpoint? */
1995 if (ep_state & EP_GETTING_NO_STREAMS) {
1996 xhci_warn(xhci, "WARN Can't disable streams for "
1997 "endpoint 0x%x\n, "
1998 "streams are being disabled already.",
1999 eps[i]->desc.bEndpointAddress);
2000 return 0;
2001 }
2002 /* Are there actually any streams to free? */
2003 if (!(ep_state & EP_HAS_STREAMS) &&
2004 !(ep_state & EP_GETTING_STREAMS)) {
2005 xhci_warn(xhci, "WARN Can't disable streams for "
2006 "endpoint 0x%x\n, "
2007 "streams are already disabled!",
2008 eps[i]->desc.bEndpointAddress);
2009 xhci_warn(xhci, "WARN xhci_free_streams() called "
2010 "with non-streams endpoint\n");
2011 return 0;
2012 }
2013 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
2014 }
2015 return changed_ep_bitmask;
2016}
2017
2018/*
2019 * The USB device drivers use this function (though the HCD interface in USB
2020 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
2021 * coordinate mass storage command queueing across multiple endpoints (basically
2022 * a stream ID == a task ID).
2023 *
2024 * Setting up streams involves allocating the same size stream context array
2025 * for each endpoint and issuing a configure endpoint command for all endpoints.
2026 *
2027 * Don't allow the call to succeed if one endpoint only supports one stream
2028 * (which means it doesn't support streams at all).
2029 *
2030 * Drivers may get less stream IDs than they asked for, if the host controller
2031 * hardware or endpoints claim they can't support the number of requested
2032 * stream IDs.
2033 */
2034int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
2035 struct usb_host_endpoint **eps, unsigned int num_eps,
2036 unsigned int num_streams, gfp_t mem_flags)
2037{
2038 int i, ret;
2039 struct xhci_hcd *xhci;
2040 struct xhci_virt_device *vdev;
2041 struct xhci_command *config_cmd;
2042 unsigned int ep_index;
2043 unsigned int num_stream_ctxs;
2044 unsigned long flags;
2045 u32 changed_ep_bitmask = 0;
2046
2047 if (!eps)
2048 return -EINVAL;
2049
2050 /* Add one to the number of streams requested to account for
2051 * stream 0 that is reserved for xHCI usage.
2052 */
2053 num_streams += 1;
2054 xhci = hcd_to_xhci(hcd);
2055 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2056 num_streams);
2057
2058 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2059 if (!config_cmd) {
2060 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2061 return -ENOMEM;
2062 }
2063
2064 /* Check to make sure all endpoints are not already configured for
2065 * streams. While we're at it, find the maximum number of streams that
2066 * all the endpoints will support and check for duplicate endpoints.
2067 */
2068 spin_lock_irqsave(&xhci->lock, flags);
2069 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2070 num_eps, &num_streams, &changed_ep_bitmask);
2071 if (ret < 0) {
2072 xhci_free_command(xhci, config_cmd);
2073 spin_unlock_irqrestore(&xhci->lock, flags);
2074 return ret;
2075 }
2076 if (num_streams <= 1) {
2077 xhci_warn(xhci, "WARN: endpoints can't handle "
2078 "more than one stream.\n");
2079 xhci_free_command(xhci, config_cmd);
2080 spin_unlock_irqrestore(&xhci->lock, flags);
2081 return -EINVAL;
2082 }
2083 vdev = xhci->devs[udev->slot_id];
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002084 /* Mark each endpoint as being in transition, so
Sarah Sharp8df75f42010-04-02 15:34:16 -07002085 * xhci_urb_enqueue() will reject all URBs.
2086 */
2087 for (i = 0; i < num_eps; i++) {
2088 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2089 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2090 }
2091 spin_unlock_irqrestore(&xhci->lock, flags);
2092
2093 /* Setup internal data structures and allocate HW data structures for
2094 * streams (but don't install the HW structures in the input context
2095 * until we're sure all memory allocation succeeded).
2096 */
2097 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2098 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2099 num_stream_ctxs, num_streams);
2100
2101 for (i = 0; i < num_eps; i++) {
2102 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2103 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2104 num_stream_ctxs,
2105 num_streams, mem_flags);
2106 if (!vdev->eps[ep_index].stream_info)
2107 goto cleanup;
2108 /* Set maxPstreams in endpoint context and update deq ptr to
2109 * point to stream context array. FIXME
2110 */
2111 }
2112
2113 /* Set up the input context for a configure endpoint command. */
2114 for (i = 0; i < num_eps; i++) {
2115 struct xhci_ep_ctx *ep_ctx;
2116
2117 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2118 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2119
2120 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2121 vdev->out_ctx, ep_index);
2122 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2123 vdev->eps[ep_index].stream_info);
2124 }
2125 /* Tell the HW to drop its old copy of the endpoint context info
2126 * and add the updated copy from the input context.
2127 */
2128 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2129 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2130
2131 /* Issue and wait for the configure endpoint command */
2132 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2133 false, false);
2134
2135 /* xHC rejected the configure endpoint command for some reason, so we
2136 * leave the old ring intact and free our internal streams data
2137 * structure.
2138 */
2139 if (ret < 0)
2140 goto cleanup;
2141
2142 spin_lock_irqsave(&xhci->lock, flags);
2143 for (i = 0; i < num_eps; i++) {
2144 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2145 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2146 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
2147 udev->slot_id, ep_index);
2148 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
2149 }
2150 xhci_free_command(xhci, config_cmd);
2151 spin_unlock_irqrestore(&xhci->lock, flags);
2152
2153 /* Subtract 1 for stream 0, which drivers can't use */
2154 return num_streams - 1;
2155
2156cleanup:
2157 /* If it didn't work, free the streams! */
2158 for (i = 0; i < num_eps; i++) {
2159 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2160 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
Sarah Sharp8a007742010-04-30 15:37:56 -07002161 vdev->eps[ep_index].stream_info = NULL;
Sarah Sharp8df75f42010-04-02 15:34:16 -07002162 /* FIXME Unset maxPstreams in endpoint context and
2163 * update deq ptr to point to normal string ring.
2164 */
2165 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2166 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2167 xhci_endpoint_zero(xhci, vdev, eps[i]);
2168 }
2169 xhci_free_command(xhci, config_cmd);
2170 return -ENOMEM;
2171}
2172
2173/* Transition the endpoint from using streams to being a "normal" endpoint
2174 * without streams.
2175 *
2176 * Modify the endpoint context state, submit a configure endpoint command,
2177 * and free all endpoint rings for streams if that completes successfully.
2178 */
2179int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
2180 struct usb_host_endpoint **eps, unsigned int num_eps,
2181 gfp_t mem_flags)
2182{
2183 int i, ret;
2184 struct xhci_hcd *xhci;
2185 struct xhci_virt_device *vdev;
2186 struct xhci_command *command;
2187 unsigned int ep_index;
2188 unsigned long flags;
2189 u32 changed_ep_bitmask;
2190
2191 xhci = hcd_to_xhci(hcd);
2192 vdev = xhci->devs[udev->slot_id];
2193
2194 /* Set up a configure endpoint command to remove the streams rings */
2195 spin_lock_irqsave(&xhci->lock, flags);
2196 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
2197 udev, eps, num_eps);
2198 if (changed_ep_bitmask == 0) {
2199 spin_unlock_irqrestore(&xhci->lock, flags);
2200 return -EINVAL;
2201 }
2202
2203 /* Use the xhci_command structure from the first endpoint. We may have
2204 * allocated too many, but the driver may call xhci_free_streams() for
2205 * each endpoint it grouped into one call to xhci_alloc_streams().
2206 */
2207 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
2208 command = vdev->eps[ep_index].stream_info->free_streams_command;
2209 for (i = 0; i < num_eps; i++) {
2210 struct xhci_ep_ctx *ep_ctx;
2211
2212 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2213 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
2214 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
2215 EP_GETTING_NO_STREAMS;
2216
2217 xhci_endpoint_copy(xhci, command->in_ctx,
2218 vdev->out_ctx, ep_index);
2219 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
2220 &vdev->eps[ep_index]);
2221 }
2222 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
2223 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2224 spin_unlock_irqrestore(&xhci->lock, flags);
2225
2226 /* Issue and wait for the configure endpoint command,
2227 * which must succeed.
2228 */
2229 ret = xhci_configure_endpoint(xhci, udev, command,
2230 false, true);
2231
2232 /* xHC rejected the configure endpoint command for some reason, so we
2233 * leave the streams rings intact.
2234 */
2235 if (ret < 0)
2236 return ret;
2237
2238 spin_lock_irqsave(&xhci->lock, flags);
2239 for (i = 0; i < num_eps; i++) {
2240 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2241 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
Sarah Sharp8a007742010-04-30 15:37:56 -07002242 vdev->eps[ep_index].stream_info = NULL;
Sarah Sharp8df75f42010-04-02 15:34:16 -07002243 /* FIXME Unset maxPstreams in endpoint context and
2244 * update deq ptr to point to normal string ring.
2245 */
2246 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
2247 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2248 }
2249 spin_unlock_irqrestore(&xhci->lock, flags);
2250
2251 return 0;
2252}
2253
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002254/*
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08002255 * This submits a Reset Device Command, which will set the device state to 0,
2256 * set the device address to 0, and disable all the endpoints except the default
2257 * control endpoint. The USB core should come back and call
2258 * xhci_address_device(), and then re-set up the configuration. If this is
2259 * called because of a usb_reset_and_verify_device(), then the old alternate
2260 * settings will be re-installed through the normal bandwidth allocation
2261 * functions.
2262 *
2263 * Wait for the Reset Device command to finish. Remove all structures
2264 * associated with the endpoints that were disabled. Clear the input device
2265 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
Andiry Xuf0615c42010-10-14 07:22:48 -07002266 *
2267 * If the virt_dev to be reset does not exist or does not match the udev,
2268 * it means the device is lost, possibly due to the xHC restore error and
2269 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
2270 * re-allocate the device.
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08002271 */
Andiry Xuf0615c42010-10-14 07:22:48 -07002272int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08002273{
2274 int ret, i;
2275 unsigned long flags;
2276 struct xhci_hcd *xhci;
2277 unsigned int slot_id;
2278 struct xhci_virt_device *virt_dev;
2279 struct xhci_command *reset_device_cmd;
2280 int timeleft;
2281 int last_freed_endpoint;
2282
Andiry Xuf0615c42010-10-14 07:22:48 -07002283 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08002284 if (ret <= 0)
2285 return ret;
2286 xhci = hcd_to_xhci(hcd);
2287 slot_id = udev->slot_id;
2288 virt_dev = xhci->devs[slot_id];
Andiry Xuf0615c42010-10-14 07:22:48 -07002289 if (!virt_dev) {
2290 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2291 "not exist. Re-allocate the device\n", slot_id);
2292 ret = xhci_alloc_dev(hcd, udev);
2293 if (ret == 1)
2294 return 0;
2295 else
2296 return -EINVAL;
2297 }
2298
2299 if (virt_dev->udev != udev) {
2300 /* If the virt_dev and the udev does not match, this virt_dev
2301 * may belong to another udev.
2302 * Re-allocate the device.
2303 */
2304 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2305 "not match the udev. Re-allocate the device\n",
2306 slot_id);
2307 ret = xhci_alloc_dev(hcd, udev);
2308 if (ret == 1)
2309 return 0;
2310 else
2311 return -EINVAL;
2312 }
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08002313
2314 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
2315 /* Allocate the command structure that holds the struct completion.
2316 * Assume we're in process context, since the normal device reset
2317 * process has to wait for the device anyway. Storage devices are
2318 * reset as part of error handling, so use GFP_NOIO instead of
2319 * GFP_KERNEL.
2320 */
2321 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
2322 if (!reset_device_cmd) {
2323 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
2324 return -ENOMEM;
2325 }
2326
2327 /* Attempt to submit the Reset Device command to the command ring */
2328 spin_lock_irqsave(&xhci->lock, flags);
2329 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08002330
2331 /* Enqueue pointer can be left pointing to the link TRB,
2332 * we must handle that
2333 */
Matt Evans28ccd292011-03-29 13:40:46 +11002334 if ((le32_to_cpu(reset_device_cmd->command_trb->link.control)
2335 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
Paul Zimmerman7a3783e2010-11-17 16:26:50 -08002336 reset_device_cmd->command_trb =
2337 xhci->cmd_ring->enq_seg->next->trbs;
2338
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08002339 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
2340 ret = xhci_queue_reset_device(xhci, slot_id);
2341 if (ret) {
2342 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2343 list_del(&reset_device_cmd->cmd_list);
2344 spin_unlock_irqrestore(&xhci->lock, flags);
2345 goto command_cleanup;
2346 }
2347 xhci_ring_cmd_db(xhci);
2348 spin_unlock_irqrestore(&xhci->lock, flags);
2349
2350 /* Wait for the Reset Device command to finish */
2351 timeleft = wait_for_completion_interruptible_timeout(
2352 reset_device_cmd->completion,
2353 USB_CTRL_SET_TIMEOUT);
2354 if (timeleft <= 0) {
2355 xhci_warn(xhci, "%s while waiting for reset device command\n",
2356 timeleft == 0 ? "Timeout" : "Signal");
2357 spin_lock_irqsave(&xhci->lock, flags);
2358 /* The timeout might have raced with the event ring handler, so
2359 * only delete from the list if the item isn't poisoned.
2360 */
2361 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
2362 list_del(&reset_device_cmd->cmd_list);
2363 spin_unlock_irqrestore(&xhci->lock, flags);
2364 ret = -ETIME;
2365 goto command_cleanup;
2366 }
2367
2368 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
2369 * unless we tried to reset a slot ID that wasn't enabled,
2370 * or the device wasn't in the addressed or configured state.
2371 */
2372 ret = reset_device_cmd->status;
2373 switch (ret) {
2374 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
2375 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
2376 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
2377 slot_id,
2378 xhci_get_slot_state(xhci, virt_dev->out_ctx));
2379 xhci_info(xhci, "Not freeing device rings.\n");
2380 /* Don't treat this as an error. May change my mind later. */
2381 ret = 0;
2382 goto command_cleanup;
2383 case COMP_SUCCESS:
2384 xhci_dbg(xhci, "Successful reset device command.\n");
2385 break;
2386 default:
2387 if (xhci_is_vendor_info_code(xhci, ret))
2388 break;
2389 xhci_warn(xhci, "Unknown completion code %u for "
2390 "reset device command.\n", ret);
2391 ret = -EINVAL;
2392 goto command_cleanup;
2393 }
2394
2395 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2396 last_freed_endpoint = 1;
2397 for (i = 1; i < 31; ++i) {
Dmitry Torokhov2dea75d2011-04-12 23:06:28 -07002398 struct xhci_virt_ep *ep = &virt_dev->eps[i];
2399
2400 if (ep->ep_state & EP_HAS_STREAMS) {
2401 xhci_free_stream_info(xhci, ep->stream_info);
2402 ep->stream_info = NULL;
2403 ep->ep_state &= ~EP_HAS_STREAMS;
2404 }
2405
2406 if (ep->ring) {
2407 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2408 last_freed_endpoint = i;
2409 }
Sarah Sharp2a8f82c2009-12-09 15:59:13 -08002410 }
2411 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2412 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2413 ret = 0;
2414
2415command_cleanup:
2416 xhci_free_command(xhci, reset_device_cmd);
2417 return ret;
2418}
2419
2420/*
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002421 * At this point, the struct usb_device is about to go away, the device has
2422 * disconnected, and all traffic has been stopped and the endpoints have been
2423 * disabled. Free any HC data structures associated with that device.
2424 */
2425void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2426{
2427 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002428 struct xhci_virt_device *virt_dev;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002429 unsigned long flags;
Sarah Sharpc526d0d2009-09-16 16:42:39 -07002430 u32 state;
Andiry Xu64927732010-10-14 07:22:45 -07002431 int i, ret;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002432
Andiry Xu64927732010-10-14 07:22:45 -07002433 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2434 if (ret <= 0)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002435 return;
Andiry Xu64927732010-10-14 07:22:45 -07002436
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002437 virt_dev = xhci->devs[udev->slot_id];
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002438
2439 /* Stop any wayward timer functions (which may grab the lock) */
2440 for (i = 0; i < 31; ++i) {
2441 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
2442 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
2443 }
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002444
2445 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharpc526d0d2009-09-16 16:42:39 -07002446 /* Don't disable the slot if the host controller is dead. */
2447 state = xhci_readl(xhci, &xhci->op_regs->status);
Sarah Sharp6f5165c2009-10-27 10:57:01 -07002448 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
Sarah Sharpc526d0d2009-09-16 16:42:39 -07002449 xhci_free_virt_device(xhci, udev->slot_id);
2450 spin_unlock_irqrestore(&xhci->lock, flags);
2451 return;
2452 }
2453
Sarah Sharp23e3be12009-04-29 19:05:20 -07002454 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002455 spin_unlock_irqrestore(&xhci->lock, flags);
2456 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2457 return;
2458 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07002459 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002460 spin_unlock_irqrestore(&xhci->lock, flags);
2461 /*
2462 * Event command completion handler will free any data structures
Sarah Sharpf88ba782009-05-14 11:44:22 -07002463 * associated with the slot. XXX Can free sleep?
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002464 */
2465}
2466
2467/*
2468 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2469 * timed out, or allocating memory failed. Returns 1 on success.
2470 */
2471int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2472{
2473 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2474 unsigned long flags;
2475 int timeleft;
2476 int ret;
2477
2478 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp23e3be12009-04-29 19:05:20 -07002479 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002480 if (ret) {
2481 spin_unlock_irqrestore(&xhci->lock, flags);
2482 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2483 return 0;
2484 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07002485 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002486 spin_unlock_irqrestore(&xhci->lock, flags);
2487
2488 /* XXX: how much time for xHC slot assignment? */
2489 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2490 USB_CTRL_SET_TIMEOUT);
2491 if (timeleft <= 0) {
2492 xhci_warn(xhci, "%s while waiting for a slot\n",
2493 timeleft == 0 ? "Timeout" : "Signal");
2494 /* FIXME cancel the enable slot request */
2495 return 0;
2496 }
2497
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002498 if (!xhci->slot_id) {
2499 xhci_err(xhci, "Error while assigning device slot ID\n");
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002500 return 0;
2501 }
Sarah Sharpa6d940d2010-12-28 13:08:42 -08002502 /* xhci_alloc_virt_device() does not touch rings; no need to lock.
2503 * Use GFP_NOIO, since this function can be called from
2504 * xhci_discover_or_reset_device(), which may be called as part of
2505 * mass storage driver error handling.
2506 */
2507 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002508 /* Disable slot, if we can do it without mem alloc */
2509 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
Sarah Sharpf88ba782009-05-14 11:44:22 -07002510 spin_lock_irqsave(&xhci->lock, flags);
Sarah Sharp23e3be12009-04-29 19:05:20 -07002511 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2512 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002513 spin_unlock_irqrestore(&xhci->lock, flags);
2514 return 0;
2515 }
2516 udev->slot_id = xhci->slot_id;
2517 /* Is this a LS or FS device under a HS hub? */
2518 /* Hub or peripherial? */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002519 return 1;
2520}
2521
2522/*
2523 * Issue an Address Device command (which will issue a SetAddress request to
2524 * the device).
2525 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
2526 * we should only issue and wait on one address command at the same time.
2527 *
2528 * We add one to the device address issued by the hardware because the USB core
2529 * uses address 1 for the root hubs (even though they're not really devices).
2530 */
2531int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2532{
2533 unsigned long flags;
2534 int timeleft;
2535 struct xhci_virt_device *virt_dev;
2536 int ret = 0;
2537 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
John Yound115b042009-07-27 12:05:15 -07002538 struct xhci_slot_ctx *slot_ctx;
2539 struct xhci_input_control_ctx *ctrl_ctx;
Sarah Sharp8e595a52009-07-27 12:03:31 -07002540 u64 temp_64;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002541
2542 if (!udev->slot_id) {
2543 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
2544 return -EINVAL;
2545 }
2546
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002547 virt_dev = xhci->devs[udev->slot_id];
2548
Andiry Xuf0615c42010-10-14 07:22:48 -07002549 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2550 /*
2551 * If this is the first Set Address since device plug-in or
2552 * virt_device realloaction after a resume with an xHCI power loss,
2553 * then set up the slot context.
2554 */
2555 if (!slot_ctx->dev_info)
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002556 xhci_setup_addressable_virt_dev(xhci, udev);
Andiry Xuf0615c42010-10-14 07:22:48 -07002557 /* Otherwise, update the control endpoint ring enqueue pointer. */
Sarah Sharp2d1ee592010-07-09 17:08:54 +02002558 else
2559 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
Sarah Sharp66e49d82009-07-27 12:03:46 -07002560 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07002561 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002562
Sarah Sharpf88ba782009-05-14 11:44:22 -07002563 spin_lock_irqsave(&xhci->lock, flags);
John Yound115b042009-07-27 12:05:15 -07002564 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
2565 udev->slot_id);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002566 if (ret) {
2567 spin_unlock_irqrestore(&xhci->lock, flags);
2568 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2569 return ret;
2570 }
Sarah Sharp23e3be12009-04-29 19:05:20 -07002571 xhci_ring_cmd_db(xhci);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002572 spin_unlock_irqrestore(&xhci->lock, flags);
2573
2574 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2575 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2576 USB_CTRL_SET_TIMEOUT);
2577 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
2578 * the SetAddress() "recovery interval" required by USB and aborting the
2579 * command on a timeout.
2580 */
2581 if (timeleft <= 0) {
2582 xhci_warn(xhci, "%s while waiting for a slot\n",
2583 timeleft == 0 ? "Timeout" : "Signal");
2584 /* FIXME cancel the address device command */
2585 return -ETIME;
2586 }
2587
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002588 switch (virt_dev->cmd_status) {
2589 case COMP_CTX_STATE:
2590 case COMP_EBADSLT:
2591 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
2592 udev->slot_id);
2593 ret = -EINVAL;
2594 break;
2595 case COMP_TX_ERR:
2596 dev_warn(&udev->dev, "Device not responding to set address.\n");
2597 ret = -EPROTO;
2598 break;
2599 case COMP_SUCCESS:
2600 xhci_dbg(xhci, "Successful Address Device command\n");
2601 break;
2602 default:
2603 xhci_err(xhci, "ERROR: unexpected command completion "
2604 "code 0x%x.\n", virt_dev->cmd_status);
Sarah Sharp66e49d82009-07-27 12:03:46 -07002605 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07002606 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002607 ret = -EINVAL;
2608 break;
2609 }
2610 if (ret) {
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002611 return ret;
2612 }
Sarah Sharp8e595a52009-07-27 12:03:31 -07002613 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2614 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
2615 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
Matt Evans28ccd292011-03-29 13:40:46 +11002616 udev->slot_id,
2617 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
2618 (unsigned long long)
2619 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002620 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
John Yound115b042009-07-27 12:05:15 -07002621 (unsigned long long)virt_dev->out_ctx->dma);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002622 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07002623 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002624 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
John Yound115b042009-07-27 12:05:15 -07002625 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002626 /*
2627 * USB core uses address 1 for the roothubs, so we add one to the
2628 * address given back to us by the HC.
2629 */
John Yound115b042009-07-27 12:05:15 -07002630 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
Andiry Xuc8d4af82010-10-14 07:22:51 -07002631 /* Use kernel assigned address for devices; store xHC assigned
2632 * address locally. */
Matt Evans28ccd292011-03-29 13:40:46 +11002633 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
2634 + 1;
Sarah Sharpf94e01862009-04-27 19:58:38 -07002635 /* Zero the input context control for later use */
John Yound115b042009-07-27 12:05:15 -07002636 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2637 ctrl_ctx->add_flags = 0;
2638 ctrl_ctx->drop_flags = 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002639
Andiry Xuc8d4af82010-10-14 07:22:51 -07002640 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002641
2642 return 0;
2643}
2644
Sarah Sharpac1c1b72009-09-04 10:53:20 -07002645/* Once a hub descriptor is fetched for a device, we need to update the xHC's
2646 * internal data structures for the device.
2647 */
2648int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
2649 struct usb_tt *tt, gfp_t mem_flags)
2650{
2651 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2652 struct xhci_virt_device *vdev;
2653 struct xhci_command *config_cmd;
2654 struct xhci_input_control_ctx *ctrl_ctx;
2655 struct xhci_slot_ctx *slot_ctx;
2656 unsigned long flags;
2657 unsigned think_time;
2658 int ret;
2659
2660 /* Ignore root hubs */
2661 if (!hdev->parent)
2662 return 0;
2663
2664 vdev = xhci->devs[hdev->slot_id];
2665 if (!vdev) {
2666 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
2667 return -EINVAL;
2668 }
Sarah Sharpa1d78c12009-12-09 15:59:03 -08002669 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07002670 if (!config_cmd) {
2671 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2672 return -ENOMEM;
2673 }
2674
2675 spin_lock_irqsave(&xhci->lock, flags);
2676 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
2677 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11002678 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07002679 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
Matt Evans28ccd292011-03-29 13:40:46 +11002680 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07002681 if (tt->multi)
Matt Evans28ccd292011-03-29 13:40:46 +11002682 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
Sarah Sharpac1c1b72009-09-04 10:53:20 -07002683 if (xhci->hci_version > 0x95) {
2684 xhci_dbg(xhci, "xHCI version %x needs hub "
2685 "TT think time and number of ports\n",
2686 (unsigned int) xhci->hci_version);
Matt Evans28ccd292011-03-29 13:40:46 +11002687 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
Sarah Sharpac1c1b72009-09-04 10:53:20 -07002688 /* Set TT think time - convert from ns to FS bit times.
2689 * 0 = 8 FS bit times, 1 = 16 FS bit times,
2690 * 2 = 24 FS bit times, 3 = 32 FS bit times.
2691 */
2692 think_time = tt->think_time;
2693 if (think_time != 0)
2694 think_time = (think_time / 666) - 1;
Matt Evans28ccd292011-03-29 13:40:46 +11002695 slot_ctx->tt_info |= cpu_to_le32(TT_THINK_TIME(think_time));
Sarah Sharpac1c1b72009-09-04 10:53:20 -07002696 } else {
2697 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
2698 "TT think time or number of ports\n",
2699 (unsigned int) xhci->hci_version);
2700 }
2701 slot_ctx->dev_state = 0;
2702 spin_unlock_irqrestore(&xhci->lock, flags);
2703
2704 xhci_dbg(xhci, "Set up %s for hub device.\n",
2705 (xhci->hci_version > 0x95) ?
2706 "configure endpoint" : "evaluate context");
2707 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
2708 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
2709
2710 /* Issue and wait for the configure endpoint or
2711 * evaluate context command.
2712 */
2713 if (xhci->hci_version > 0x95)
2714 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2715 false, false);
2716 else
2717 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2718 true, false);
2719
2720 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
2721 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
2722
2723 xhci_free_command(xhci, config_cmd);
2724 return ret;
2725}
2726
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002727int xhci_get_frame(struct usb_hcd *hcd)
2728{
2729 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2730 /* EHCI mods by the periodic size. Why? */
2731 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
2732}
2733
2734MODULE_DESCRIPTION(DRIVER_DESC);
2735MODULE_AUTHOR(DRIVER_AUTHOR);
2736MODULE_LICENSE("GPL");
2737
2738static int __init xhci_hcd_init(void)
2739{
2740#ifdef CONFIG_PCI
2741 int retval = 0;
2742
2743 retval = xhci_register_pci();
2744
2745 if (retval < 0) {
2746 printk(KERN_DEBUG "Problem registering PCI driver.");
2747 return retval;
2748 }
2749#endif
Sarah Sharp98441972009-05-14 11:44:18 -07002750 /*
2751 * Check the compiler generated sizes of structures that must be laid
2752 * out in specific ways for hardware access.
2753 */
2754 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
2755 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
2756 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
2757 /* xhci_device_control has eight fields, and also
2758 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
2759 */
Sarah Sharp98441972009-05-14 11:44:18 -07002760 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
2761 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
2762 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
2763 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
2764 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
2765 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
2766 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
2767 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002768 return 0;
2769}
2770module_init(xhci_hcd_init);
2771
2772static void __exit xhci_hcd_cleanup(void)
2773{
2774#ifdef CONFIG_PCI
2775 xhci_unregister_pci();
2776#endif
2777}
2778module_exit(xhci_hcd_cleanup);