blob: 0e4b25fa3bcd262a898d6337aeeb9b6cec7b5600 [file] [log] [blame]
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/usb.h>
Sarah Sharp0ebbab32009-04-27 19:52:34 -070024#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Sarah Sharp527c6d72009-04-29 19:06:56 -070026#include <linux/dmapool.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070027
28#include "xhci.h"
29
Sarah Sharp0ebbab32009-04-27 19:52:34 -070030/*
31 * Allocates a generic ring segment from the ring pool, sets the dma address,
32 * initializes the segment to zero, and sets the private next pointer to NULL.
33 *
34 * Section 4.11.1.1:
35 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
36 */
37static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
38{
39 struct xhci_segment *seg;
40 dma_addr_t dma;
41
42 seg = kzalloc(sizeof *seg, flags);
43 if (!seg)
Randy Dunlap326b4812010-04-19 08:53:50 -070044 return NULL;
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -070045 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
Sarah Sharp0ebbab32009-04-27 19:52:34 -070046
47 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
48 if (!seg->trbs) {
49 kfree(seg);
Randy Dunlap326b4812010-04-19 08:53:50 -070050 return NULL;
Sarah Sharp0ebbab32009-04-27 19:52:34 -070051 }
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -070052 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
53 seg->trbs, (unsigned long long)dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -070054
55 memset(seg->trbs, 0, SEGMENT_SIZE);
56 seg->dma = dma;
57 seg->next = NULL;
58
59 return seg;
60}
61
62static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
63{
Sarah Sharp0ebbab32009-04-27 19:52:34 -070064 if (seg->trbs) {
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -070065 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
66 seg->trbs, (unsigned long long)seg->dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -070067 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
68 seg->trbs = NULL;
69 }
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -070070 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
Sarah Sharp0ebbab32009-04-27 19:52:34 -070071 kfree(seg);
72}
73
74/*
75 * Make the prev segment point to the next segment.
76 *
77 * Change the last TRB in the prev segment to be a Link TRB which points to the
78 * DMA address of the next segment. The caller needs to set any Link TRB
79 * related flags, such as End TRB, Toggle Cycle, and no snoop.
80 */
81static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
Andiry Xu7e393a82011-09-23 14:19:54 -070082 struct xhci_segment *next, bool link_trbs, bool isoc)
Sarah Sharp0ebbab32009-04-27 19:52:34 -070083{
84 u32 val;
85
86 if (!prev || !next)
87 return;
88 prev->next = next;
89 if (link_trbs) {
Matt Evansf5960b62011-06-01 10:22:55 +100090 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
91 cpu_to_le64(next->dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -070092
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
Matt Evans28ccd292011-03-29 13:40:46 +110094 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
Sarah Sharp0ebbab32009-04-27 19:52:34 -070095 val &= ~TRB_TYPE_BITMASK;
96 val |= TRB_TYPE(TRB_LINK);
Sarah Sharpb0567b32009-08-07 14:04:36 -070097 /* Always set the chain bit with 0.95 hardware */
Andiry Xu7e393a82011-09-23 14:19:54 -070098 /* Set chain bit for isoc rings on AMD 0.96 host */
99 if (xhci_link_trb_quirk(xhci) ||
100 (isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
Sarah Sharpb0567b32009-08-07 14:04:36 -0700101 val |= TRB_CHAIN;
Matt Evans28ccd292011-03-29 13:40:46 +1100102 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700103 }
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700104 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
105 (unsigned long long)prev->dma,
106 (unsigned long long)next->dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700107}
108
109/* XXX: Do we need the hcd structure in all these functions? */
Sarah Sharpf94e01862009-04-27 19:58:38 -0700110void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700111{
112 struct xhci_segment *seg;
113 struct xhci_segment *first_seg;
114
Kautuk Consul0e6c7f72011-09-19 16:53:12 -0700115 if (!ring)
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700116 return;
Kautuk Consul0e6c7f72011-09-19 16:53:12 -0700117 if (ring->first_seg) {
118 first_seg = ring->first_seg;
119 seg = first_seg->next;
120 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
121 while (seg != first_seg) {
122 struct xhci_segment *next = seg->next;
123 xhci_segment_free(xhci, seg);
124 seg = next;
125 }
126 xhci_segment_free(xhci, first_seg);
127 ring->first_seg = NULL;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700128 }
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700129 kfree(ring);
130}
131
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800132static void xhci_initialize_ring_info(struct xhci_ring *ring)
133{
134 /* The ring is empty, so the enqueue pointer == dequeue pointer */
135 ring->enqueue = ring->first_seg->trbs;
136 ring->enq_seg = ring->first_seg;
137 ring->dequeue = ring->enqueue;
138 ring->deq_seg = ring->first_seg;
139 /* The ring is initialized to 0. The producer must write 1 to the cycle
140 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
141 * compare CCS to the cycle bit to check ownership, so CCS = 1.
142 */
143 ring->cycle_state = 1;
144 /* Not necessary for new rings, but needed for re-initialized rings */
145 ring->enq_updates = 0;
146 ring->deq_updates = 0;
147}
148
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700149/**
150 * Create a new ring with zero or more segments.
151 *
152 * Link each segment together into a ring.
153 * Set the end flag and the cycle toggle bit on the last segment.
154 * See section 4.9.1 and figures 15 and 16.
155 */
156static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
Andiry Xu7e393a82011-09-23 14:19:54 -0700157 unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700158{
159 struct xhci_ring *ring;
160 struct xhci_segment *prev;
161
162 ring = kzalloc(sizeof *(ring), flags);
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700163 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700164 if (!ring)
Randy Dunlap326b4812010-04-19 08:53:50 -0700165 return NULL;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700166
Sarah Sharpd0e96f52009-04-27 19:58:01 -0700167 INIT_LIST_HEAD(&ring->td_list);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700168 if (num_segs == 0)
169 return ring;
170
171 ring->first_seg = xhci_segment_alloc(xhci, flags);
172 if (!ring->first_seg)
173 goto fail;
174 num_segs--;
175
176 prev = ring->first_seg;
177 while (num_segs > 0) {
178 struct xhci_segment *next;
179
180 next = xhci_segment_alloc(xhci, flags);
181 if (!next)
182 goto fail;
Andiry Xu7e393a82011-09-23 14:19:54 -0700183 xhci_link_segments(xhci, prev, next, link_trbs, isoc);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700184
185 prev = next;
186 num_segs--;
187 }
Andiry Xu7e393a82011-09-23 14:19:54 -0700188 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700189
190 if (link_trbs) {
191 /* See section 4.9.2.1 and 6.4.4.1 */
Matt Evansf5960b62011-06-01 10:22:55 +1000192 prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
193 cpu_to_le32(LINK_TOGGLE);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700194 xhci_dbg(xhci, "Wrote link toggle flag to"
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700195 " segment %p (virtual), 0x%llx (DMA)\n",
196 prev, (unsigned long long)prev->dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700197 }
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800198 xhci_initialize_ring_info(ring);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700199 return ring;
200
201fail:
202 xhci_ring_free(xhci, ring);
Randy Dunlap326b4812010-04-19 08:53:50 -0700203 return NULL;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700204}
205
Sarah Sharp412566b2009-12-09 15:59:01 -0800206void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
207 struct xhci_virt_device *virt_dev,
208 unsigned int ep_index)
209{
210 int rings_cached;
211
212 rings_cached = virt_dev->num_rings_cached;
213 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
Sarah Sharp412566b2009-12-09 15:59:01 -0800214 virt_dev->ring_cache[rings_cached] =
215 virt_dev->eps[ep_index].ring;
Sarah Sharp30f89ca2011-05-16 13:09:08 -0700216 virt_dev->num_rings_cached++;
Sarah Sharp412566b2009-12-09 15:59:01 -0800217 xhci_dbg(xhci, "Cached old ring, "
218 "%d ring%s cached\n",
Sarah Sharp30f89ca2011-05-16 13:09:08 -0700219 virt_dev->num_rings_cached,
220 (virt_dev->num_rings_cached > 1) ? "s" : "");
Sarah Sharp412566b2009-12-09 15:59:01 -0800221 } else {
222 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
223 xhci_dbg(xhci, "Ring cache full (%d rings), "
224 "freeing ring\n",
225 virt_dev->num_rings_cached);
226 }
227 virt_dev->eps[ep_index].ring = NULL;
228}
229
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800230/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
231 * pointers to the beginning of the ring.
232 */
233static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
Andiry Xu7e393a82011-09-23 14:19:54 -0700234 struct xhci_ring *ring, bool isoc)
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800235{
236 struct xhci_segment *seg = ring->first_seg;
237 do {
238 memset(seg->trbs, 0,
239 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
240 /* All endpoint rings have link TRBs */
Andiry Xu7e393a82011-09-23 14:19:54 -0700241 xhci_link_segments(xhci, seg, seg->next, 1, isoc);
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800242 seg = seg->next;
243 } while (seg != ring->first_seg);
244 xhci_initialize_ring_info(ring);
245 /* td list should be empty since all URBs have been cancelled,
246 * but just in case...
247 */
248 INIT_LIST_HEAD(&ring->td_list);
249}
250
John Yound115b042009-07-27 12:05:15 -0700251#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
252
Randy Dunlap326b4812010-04-19 08:53:50 -0700253static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
John Yound115b042009-07-27 12:05:15 -0700254 int type, gfp_t flags)
255{
256 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
257 if (!ctx)
258 return NULL;
259
260 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
261 ctx->type = type;
262 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
263 if (type == XHCI_CTX_TYPE_INPUT)
264 ctx->size += CTX_SIZE(xhci->hcc_params);
265
266 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
267 memset(ctx->bytes, 0, ctx->size);
268 return ctx;
269}
270
Randy Dunlap326b4812010-04-19 08:53:50 -0700271static void xhci_free_container_ctx(struct xhci_hcd *xhci,
John Yound115b042009-07-27 12:05:15 -0700272 struct xhci_container_ctx *ctx)
273{
Sarah Sharpa1d78c12009-12-09 15:59:03 -0800274 if (!ctx)
275 return;
John Yound115b042009-07-27 12:05:15 -0700276 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
277 kfree(ctx);
278}
279
280struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
281 struct xhci_container_ctx *ctx)
282{
283 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
284 return (struct xhci_input_control_ctx *)ctx->bytes;
285}
286
287struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
288 struct xhci_container_ctx *ctx)
289{
290 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
291 return (struct xhci_slot_ctx *)ctx->bytes;
292
293 return (struct xhci_slot_ctx *)
294 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
295}
296
297struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
298 struct xhci_container_ctx *ctx,
299 unsigned int ep_index)
300{
301 /* increment ep index by offset of start of ep ctx array */
302 ep_index++;
303 if (ctx->type == XHCI_CTX_TYPE_INPUT)
304 ep_index++;
305
306 return (struct xhci_ep_ctx *)
307 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
308}
309
Sarah Sharp8df75f42010-04-02 15:34:16 -0700310
311/***************** Streams structures manipulation *************************/
312
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800313static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
Sarah Sharp8df75f42010-04-02 15:34:16 -0700314 unsigned int num_stream_ctxs,
315 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
316{
317 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
318
319 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -0700320 dma_free_coherent(&pdev->dev,
Sarah Sharp8df75f42010-04-02 15:34:16 -0700321 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
322 stream_ctx, dma);
323 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
324 return dma_pool_free(xhci->small_streams_pool,
325 stream_ctx, dma);
326 else
327 return dma_pool_free(xhci->medium_streams_pool,
328 stream_ctx, dma);
329}
330
331/*
332 * The stream context array for each endpoint with bulk streams enabled can
333 * vary in size, based on:
334 * - how many streams the endpoint supports,
335 * - the maximum primary stream array size the host controller supports,
336 * - and how many streams the device driver asks for.
337 *
338 * The stream context array must be a power of 2, and can be as small as
339 * 64 bytes or as large as 1MB.
340 */
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800341static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
Sarah Sharp8df75f42010-04-02 15:34:16 -0700342 unsigned int num_stream_ctxs, dma_addr_t *dma,
343 gfp_t mem_flags)
344{
345 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
346
347 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -0700348 return dma_alloc_coherent(&pdev->dev,
Sarah Sharp8df75f42010-04-02 15:34:16 -0700349 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -0700350 dma, mem_flags);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700351 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
352 return dma_pool_alloc(xhci->small_streams_pool,
353 mem_flags, dma);
354 else
355 return dma_pool_alloc(xhci->medium_streams_pool,
356 mem_flags, dma);
357}
358
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700359struct xhci_ring *xhci_dma_to_transfer_ring(
360 struct xhci_virt_ep *ep,
361 u64 address)
362{
363 if (ep->ep_state & EP_HAS_STREAMS)
364 return radix_tree_lookup(&ep->stream_info->trb_address_map,
365 address >> SEGMENT_SHIFT);
366 return ep->ring;
367}
368
369/* Only use this when you know stream_info is valid */
Sarah Sharp8df75f42010-04-02 15:34:16 -0700370#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700371static struct xhci_ring *dma_to_stream_ring(
Sarah Sharp8df75f42010-04-02 15:34:16 -0700372 struct xhci_stream_info *stream_info,
373 u64 address)
374{
375 return radix_tree_lookup(&stream_info->trb_address_map,
376 address >> SEGMENT_SHIFT);
377}
378#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
379
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700380struct xhci_ring *xhci_stream_id_to_ring(
381 struct xhci_virt_device *dev,
382 unsigned int ep_index,
383 unsigned int stream_id)
384{
385 struct xhci_virt_ep *ep = &dev->eps[ep_index];
386
387 if (stream_id == 0)
388 return ep->ring;
389 if (!ep->stream_info)
390 return NULL;
391
392 if (stream_id > ep->stream_info->num_streams)
393 return NULL;
394 return ep->stream_info->stream_rings[stream_id];
395}
396
Sarah Sharp8df75f42010-04-02 15:34:16 -0700397#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
398static int xhci_test_radix_tree(struct xhci_hcd *xhci,
399 unsigned int num_streams,
400 struct xhci_stream_info *stream_info)
401{
402 u32 cur_stream;
403 struct xhci_ring *cur_ring;
404 u64 addr;
405
406 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
407 struct xhci_ring *mapped_ring;
408 int trb_size = sizeof(union xhci_trb);
409
410 cur_ring = stream_info->stream_rings[cur_stream];
411 for (addr = cur_ring->first_seg->dma;
412 addr < cur_ring->first_seg->dma + SEGMENT_SIZE;
413 addr += trb_size) {
414 mapped_ring = dma_to_stream_ring(stream_info, addr);
415 if (cur_ring != mapped_ring) {
416 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
417 "didn't map to stream ID %u; "
418 "mapped to ring %p\n",
419 (unsigned long long) addr,
420 cur_stream,
421 mapped_ring);
422 return -EINVAL;
423 }
424 }
425 /* One TRB after the end of the ring segment shouldn't return a
426 * pointer to the current ring (although it may be a part of a
427 * different ring).
428 */
429 mapped_ring = dma_to_stream_ring(stream_info, addr);
430 if (mapped_ring != cur_ring) {
431 /* One TRB before should also fail */
432 addr = cur_ring->first_seg->dma - trb_size;
433 mapped_ring = dma_to_stream_ring(stream_info, addr);
434 }
435 if (mapped_ring == cur_ring) {
436 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
437 "mapped to valid stream ID %u; "
438 "mapped ring = %p\n",
439 (unsigned long long) addr,
440 cur_stream,
441 mapped_ring);
442 return -EINVAL;
443 }
444 }
445 return 0;
446}
447#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
448
449/*
450 * Change an endpoint's internal structure so it supports stream IDs. The
451 * number of requested streams includes stream 0, which cannot be used by device
452 * drivers.
453 *
454 * The number of stream contexts in the stream context array may be bigger than
455 * the number of streams the driver wants to use. This is because the number of
456 * stream context array entries must be a power of two.
457 *
458 * We need a radix tree for mapping physical addresses of TRBs to which stream
459 * ID they belong to. We need to do this because the host controller won't tell
460 * us which stream ring the TRB came from. We could store the stream ID in an
461 * event data TRB, but that doesn't help us for the cancellation case, since the
462 * endpoint may stop before it reaches that event data TRB.
463 *
464 * The radix tree maps the upper portion of the TRB DMA address to a ring
465 * segment that has the same upper portion of DMA addresses. For example, say I
466 * have segments of size 1KB, that are always 64-byte aligned. A segment may
467 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
468 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
469 * pass the radix tree a key to get the right stream ID:
470 *
471 * 0x10c90fff >> 10 = 0x43243
472 * 0x10c912c0 >> 10 = 0x43244
473 * 0x10c91400 >> 10 = 0x43245
474 *
475 * Obviously, only those TRBs with DMA addresses that are within the segment
476 * will make the radix tree return the stream ID for that ring.
477 *
478 * Caveats for the radix tree:
479 *
480 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
481 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
482 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
483 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
484 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
485 * extended systems (where the DMA address can be bigger than 32-bits),
486 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
487 */
488struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
489 unsigned int num_stream_ctxs,
490 unsigned int num_streams, gfp_t mem_flags)
491{
492 struct xhci_stream_info *stream_info;
493 u32 cur_stream;
494 struct xhci_ring *cur_ring;
495 unsigned long key;
496 u64 addr;
497 int ret;
498
499 xhci_dbg(xhci, "Allocating %u streams and %u "
500 "stream context array entries.\n",
501 num_streams, num_stream_ctxs);
502 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
503 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
504 return NULL;
505 }
506 xhci->cmd_ring_reserved_trbs++;
507
508 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
509 if (!stream_info)
510 goto cleanup_trbs;
511
512 stream_info->num_streams = num_streams;
513 stream_info->num_stream_ctxs = num_stream_ctxs;
514
515 /* Initialize the array of virtual pointers to stream rings. */
516 stream_info->stream_rings = kzalloc(
517 sizeof(struct xhci_ring *)*num_streams,
518 mem_flags);
519 if (!stream_info->stream_rings)
520 goto cleanup_info;
521
522 /* Initialize the array of DMA addresses for stream rings for the HW. */
523 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
524 num_stream_ctxs, &stream_info->ctx_array_dma,
525 mem_flags);
526 if (!stream_info->stream_ctx_array)
527 goto cleanup_ctx;
528 memset(stream_info->stream_ctx_array, 0,
529 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
530
531 /* Allocate everything needed to free the stream rings later */
532 stream_info->free_streams_command =
533 xhci_alloc_command(xhci, true, true, mem_flags);
534 if (!stream_info->free_streams_command)
535 goto cleanup_ctx;
536
537 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
538
539 /* Allocate rings for all the streams that the driver will use,
540 * and add their segment DMA addresses to the radix tree.
541 * Stream 0 is reserved.
542 */
543 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
544 stream_info->stream_rings[cur_stream] =
Andiry Xu7e393a82011-09-23 14:19:54 -0700545 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700546 cur_ring = stream_info->stream_rings[cur_stream];
547 if (!cur_ring)
548 goto cleanup_rings;
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700549 cur_ring->stream_id = cur_stream;
Sarah Sharp8df75f42010-04-02 15:34:16 -0700550 /* Set deq ptr, cycle bit, and stream context type */
551 addr = cur_ring->first_seg->dma |
552 SCT_FOR_CTX(SCT_PRI_TR) |
553 cur_ring->cycle_state;
Matt Evansf5960b62011-06-01 10:22:55 +1000554 stream_info->stream_ctx_array[cur_stream].stream_ring =
555 cpu_to_le64(addr);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700556 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
557 cur_stream, (unsigned long long) addr);
558
559 key = (unsigned long)
560 (cur_ring->first_seg->dma >> SEGMENT_SHIFT);
561 ret = radix_tree_insert(&stream_info->trb_address_map,
562 key, cur_ring);
563 if (ret) {
564 xhci_ring_free(xhci, cur_ring);
565 stream_info->stream_rings[cur_stream] = NULL;
566 goto cleanup_rings;
567 }
568 }
569 /* Leave the other unused stream ring pointers in the stream context
570 * array initialized to zero. This will cause the xHC to give us an
571 * error if the device asks for a stream ID we don't have setup (if it
572 * was any other way, the host controller would assume the ring is
573 * "empty" and wait forever for data to be queued to that stream ID).
574 */
575#if XHCI_DEBUG
576 /* Do a little test on the radix tree to make sure it returns the
577 * correct values.
578 */
579 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
580 goto cleanup_rings;
581#endif
582
583 return stream_info;
584
585cleanup_rings:
586 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
587 cur_ring = stream_info->stream_rings[cur_stream];
588 if (cur_ring) {
589 addr = cur_ring->first_seg->dma;
590 radix_tree_delete(&stream_info->trb_address_map,
591 addr >> SEGMENT_SHIFT);
592 xhci_ring_free(xhci, cur_ring);
593 stream_info->stream_rings[cur_stream] = NULL;
594 }
595 }
596 xhci_free_command(xhci, stream_info->free_streams_command);
597cleanup_ctx:
598 kfree(stream_info->stream_rings);
599cleanup_info:
600 kfree(stream_info);
601cleanup_trbs:
602 xhci->cmd_ring_reserved_trbs--;
603 return NULL;
604}
605/*
606 * Sets the MaxPStreams field and the Linear Stream Array field.
607 * Sets the dequeue pointer to the stream context array.
608 */
609void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
610 struct xhci_ep_ctx *ep_ctx,
611 struct xhci_stream_info *stream_info)
612{
613 u32 max_primary_streams;
614 /* MaxPStreams is the number of stream context array entries, not the
615 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
616 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
617 */
618 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
619 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
620 1 << (max_primary_streams + 1));
Matt Evans28ccd292011-03-29 13:40:46 +1100621 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
622 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
623 | EP_HAS_LSA);
624 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700625}
626
627/*
628 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
629 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
630 * not at the beginning of the ring).
631 */
632void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
633 struct xhci_ep_ctx *ep_ctx,
634 struct xhci_virt_ep *ep)
635{
636 dma_addr_t addr;
Matt Evans28ccd292011-03-29 13:40:46 +1100637 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
Sarah Sharp8df75f42010-04-02 15:34:16 -0700638 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
Matt Evans28ccd292011-03-29 13:40:46 +1100639 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700640}
641
642/* Frees all stream contexts associated with the endpoint,
643 *
644 * Caller should fix the endpoint context streams fields.
645 */
646void xhci_free_stream_info(struct xhci_hcd *xhci,
647 struct xhci_stream_info *stream_info)
648{
649 int cur_stream;
650 struct xhci_ring *cur_ring;
651 dma_addr_t addr;
652
653 if (!stream_info)
654 return;
655
656 for (cur_stream = 1; cur_stream < stream_info->num_streams;
657 cur_stream++) {
658 cur_ring = stream_info->stream_rings[cur_stream];
659 if (cur_ring) {
660 addr = cur_ring->first_seg->dma;
661 radix_tree_delete(&stream_info->trb_address_map,
662 addr >> SEGMENT_SHIFT);
663 xhci_ring_free(xhci, cur_ring);
664 stream_info->stream_rings[cur_stream] = NULL;
665 }
666 }
667 xhci_free_command(xhci, stream_info->free_streams_command);
668 xhci->cmd_ring_reserved_trbs--;
669 if (stream_info->stream_ctx_array)
670 xhci_free_stream_ctx(xhci,
671 stream_info->num_stream_ctxs,
672 stream_info->stream_ctx_array,
673 stream_info->ctx_array_dma);
674
675 if (stream_info)
676 kfree(stream_info->stream_rings);
677 kfree(stream_info);
678}
679
680
681/***************** Device context manipulation *************************/
682
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700683static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
684 struct xhci_virt_ep *ep)
685{
686 init_timer(&ep->stop_cmd_timer);
687 ep->stop_cmd_timer.data = (unsigned long) ep;
688 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
689 ep->xhci = xhci;
690}
691
Sarah Sharp839c8172011-09-02 11:05:47 -0700692static void xhci_free_tt_info(struct xhci_hcd *xhci,
693 struct xhci_virt_device *virt_dev,
694 int slot_id)
695{
696 struct list_head *tt;
697 struct list_head *tt_list_head;
698 struct list_head *tt_next;
699 struct xhci_tt_bw_info *tt_info;
700
701 /* If the device never made it past the Set Address stage,
702 * it may not have the real_port set correctly.
703 */
704 if (virt_dev->real_port == 0 ||
705 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
706 xhci_dbg(xhci, "Bad real port.\n");
707 return;
708 }
709
710 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
711 if (list_empty(tt_list_head))
712 return;
713
714 list_for_each(tt, tt_list_head) {
715 tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
716 if (tt_info->slot_id == slot_id)
717 break;
718 }
719 /* Cautionary measure in case the hub was disconnected before we
720 * stored the TT information.
721 */
722 if (tt_info->slot_id != slot_id)
723 return;
724
725 tt_next = tt->next;
726 tt_info = list_entry(tt, struct xhci_tt_bw_info,
727 tt_list);
728 /* Multi-TT hubs will have more than one entry */
729 do {
730 list_del(tt);
731 kfree(tt_info);
732 tt = tt_next;
733 if (list_empty(tt_list_head))
734 break;
735 tt_next = tt->next;
736 tt_info = list_entry(tt, struct xhci_tt_bw_info,
737 tt_list);
738 } while (tt_info->slot_id == slot_id);
739}
740
741int xhci_alloc_tt_info(struct xhci_hcd *xhci,
742 struct xhci_virt_device *virt_dev,
743 struct usb_device *hdev,
744 struct usb_tt *tt, gfp_t mem_flags)
745{
746 struct xhci_tt_bw_info *tt_info;
747 unsigned int num_ports;
748 int i, j;
749
750 if (!tt->multi)
751 num_ports = 1;
752 else
753 num_ports = hdev->maxchild;
754
755 for (i = 0; i < num_ports; i++, tt_info++) {
756 struct xhci_interval_bw_table *bw_table;
757
758 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
759 if (!tt_info)
760 goto free_tts;
761 INIT_LIST_HEAD(&tt_info->tt_list);
762 list_add(&tt_info->tt_list,
763 &xhci->rh_bw[virt_dev->real_port - 1].tts);
764 tt_info->slot_id = virt_dev->udev->slot_id;
765 if (tt->multi)
766 tt_info->ttport = i+1;
767 bw_table = &tt_info->bw_table;
768 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
769 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
770 }
771 return 0;
772
773free_tts:
774 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
775 return -ENOMEM;
776}
777
778
779/* All the xhci_tds in the ring's TD list should be freed at this point.
780 * Should be called with xhci->lock held if there is any chance the TT lists
781 * will be manipulated by the configure endpoint, allocate device, or update
782 * hub functions while this function is removing the TT entries from the list.
783 */
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700784void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
785{
786 struct xhci_virt_device *dev;
787 int i;
Sarah Sharp2e279802011-09-02 11:05:50 -0700788 int old_active_eps = 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700789
790 /* Slot ID 0 is reserved */
791 if (slot_id == 0 || !xhci->devs[slot_id])
792 return;
793
794 dev = xhci->devs[slot_id];
Sarah Sharp8e595a52009-07-27 12:03:31 -0700795 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700796 if (!dev)
797 return;
798
Sarah Sharp2e279802011-09-02 11:05:50 -0700799 if (dev->tt_info)
800 old_active_eps = dev->tt_info->active_eps;
801
Sarah Sharp8df75f42010-04-02 15:34:16 -0700802 for (i = 0; i < 31; ++i) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700803 if (dev->eps[i].ring)
804 xhci_ring_free(xhci, dev->eps[i].ring);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700805 if (dev->eps[i].stream_info)
806 xhci_free_stream_info(xhci,
807 dev->eps[i].stream_info);
Sarah Sharp2e279802011-09-02 11:05:50 -0700808 /* Endpoints on the TT/root port lists should have been removed
809 * when usb_disable_device() was called for the device.
810 * We can't drop them anyway, because the udev might have gone
811 * away by this point, and we can't tell what speed it was.
812 */
813 if (!list_empty(&dev->eps[i].bw_endpoint_list))
814 xhci_warn(xhci, "Slot %u endpoint %u "
815 "not removed from BW list!\n",
816 slot_id, i);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700817 }
Sarah Sharp839c8172011-09-02 11:05:47 -0700818 /* If this is a hub, free the TT(s) from the TT list */
819 xhci_free_tt_info(xhci, dev, slot_id);
Sarah Sharp2e279802011-09-02 11:05:50 -0700820 /* If necessary, update the number of active TTs on this root port */
821 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700822
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800823 if (dev->ring_cache) {
824 for (i = 0; i < dev->num_rings_cached; i++)
825 xhci_ring_free(xhci, dev->ring_cache[i]);
826 kfree(dev->ring_cache);
827 }
828
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700829 if (dev->in_ctx)
John Yound115b042009-07-27 12:05:15 -0700830 xhci_free_container_ctx(xhci, dev->in_ctx);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700831 if (dev->out_ctx)
John Yound115b042009-07-27 12:05:15 -0700832 xhci_free_container_ctx(xhci, dev->out_ctx);
833
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700834 kfree(xhci->devs[slot_id]);
Randy Dunlap326b4812010-04-19 08:53:50 -0700835 xhci->devs[slot_id] = NULL;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700836}
837
838int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
839 struct usb_device *udev, gfp_t flags)
840{
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700841 struct xhci_virt_device *dev;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700842 int i;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700843
844 /* Slot ID 0 is reserved */
845 if (slot_id == 0 || xhci->devs[slot_id]) {
846 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
847 return 0;
848 }
849
850 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
851 if (!xhci->devs[slot_id])
852 return 0;
853 dev = xhci->devs[slot_id];
854
John Yound115b042009-07-27 12:05:15 -0700855 /* Allocate the (output) device context that will be used in the HC. */
856 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700857 if (!dev->out_ctx)
858 goto fail;
John Yound115b042009-07-27 12:05:15 -0700859
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700860 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
John Yound115b042009-07-27 12:05:15 -0700861 (unsigned long long)dev->out_ctx->dma);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700862
863 /* Allocate the (input) device context for address device command */
John Yound115b042009-07-27 12:05:15 -0700864 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700865 if (!dev->in_ctx)
866 goto fail;
John Yound115b042009-07-27 12:05:15 -0700867
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700868 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
John Yound115b042009-07-27 12:05:15 -0700869 (unsigned long long)dev->in_ctx->dma);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700870
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700871 /* Initialize the cancellation list and watchdog timers for each ep */
872 for (i = 0; i < 31; i++) {
873 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700874 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
Sarah Sharp2e279802011-09-02 11:05:50 -0700875 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700876 }
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700877
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700878 /* Allocate endpoint 0 ring */
Andiry Xu7e393a82011-09-23 14:19:54 -0700879 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700880 if (!dev->eps[0].ring)
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700881 goto fail;
882
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800883 /* Allocate pointers to the ring cache */
884 dev->ring_cache = kzalloc(
885 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
886 flags);
887 if (!dev->ring_cache)
888 goto fail;
889 dev->num_rings_cached = 0;
890
Sarah Sharpf94e01862009-04-27 19:58:38 -0700891 init_completion(&dev->cmd_completion);
Sarah Sharp913a8a32009-09-04 10:53:13 -0700892 INIT_LIST_HEAD(&dev->cmd_list);
Andiry Xu64927732010-10-14 07:22:45 -0700893 dev->udev = udev;
Sarah Sharpf94e01862009-04-27 19:58:38 -0700894
Sarah Sharp28c2d2e2009-07-27 12:05:08 -0700895 /* Point to output device context in dcbaa. */
Matt Evans28ccd292011-03-29 13:40:46 +1100896 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700897 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
Matt Evans28ccd292011-03-29 13:40:46 +1100898 slot_id,
899 &xhci->dcbaa->dev_context_ptrs[slot_id],
Matt Evansf5960b62011-06-01 10:22:55 +1000900 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700901
902 return 1;
903fail:
904 xhci_free_virt_device(xhci, slot_id);
905 return 0;
906}
907
Sarah Sharp2d1ee592010-07-09 17:08:54 +0200908void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
909 struct usb_device *udev)
910{
911 struct xhci_virt_device *virt_dev;
912 struct xhci_ep_ctx *ep0_ctx;
913 struct xhci_ring *ep_ring;
914
915 virt_dev = xhci->devs[udev->slot_id];
916 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
917 ep_ring = virt_dev->eps[0].ring;
918 /*
919 * FIXME we don't keep track of the dequeue pointer very well after a
920 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
921 * host to our enqueue pointer. This should only be called after a
922 * configured device has reset, so all control transfers should have
923 * been completed or cancelled before the reset.
924 */
Matt Evans28ccd292011-03-29 13:40:46 +1100925 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
926 ep_ring->enqueue)
927 | ep_ring->cycle_state);
Sarah Sharp2d1ee592010-07-09 17:08:54 +0200928}
929
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800930/*
931 * The xHCI roothub may have ports of differing speeds in any order in the port
932 * status registers. xhci->port_array provides an array of the port speed for
933 * each offset into the port status registers.
934 *
935 * The xHCI hardware wants to know the roothub port number that the USB device
936 * is attached to (or the roothub port its ancestor hub is attached to). All we
937 * know is the index of that port under either the USB 2.0 or the USB 3.0
938 * roothub, but that doesn't give us the real index into the HW port status
939 * registers. Scan through the xHCI roothub port array, looking for the Nth
940 * entry of the correct port speed. Return the port number of that entry.
941 */
942static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
943 struct usb_device *udev)
944{
945 struct usb_device *top_dev;
946 unsigned int num_similar_speed_ports;
947 unsigned int faked_port_num;
948 int i;
949
950 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
951 top_dev = top_dev->parent)
952 /* Found device below root hub */;
953 faked_port_num = top_dev->portnum;
954 for (i = 0, num_similar_speed_ports = 0;
955 i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
956 u8 port_speed = xhci->port_array[i];
957
958 /*
959 * Skip ports that don't have known speeds, or have duplicate
960 * Extended Capabilities port speed entries.
961 */
Dan Carpenter22e04872011-03-17 22:39:49 +0300962 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800963 continue;
964
965 /*
966 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
967 * 1.1 ports are under the USB 2.0 hub. If the port speed
968 * matches the device speed, it's a similar speed port.
969 */
970 if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
971 num_similar_speed_ports++;
972 if (num_similar_speed_ports == faked_port_num)
973 /* Roothub ports are numbered from 1 to N */
974 return i+1;
975 }
976 return 0;
977}
978
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700979/* Setup an xHCI virtual device for a Set Address command */
980int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
981{
982 struct xhci_virt_device *dev;
983 struct xhci_ep_ctx *ep0_ctx;
John Yound115b042009-07-27 12:05:15 -0700984 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -0800985 u32 port_num;
986 struct usb_device *top_dev;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700987
988 dev = xhci->devs[udev->slot_id];
989 /* Slot ID 0 is reserved */
990 if (udev->slot_id == 0 || !dev) {
991 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
992 udev->slot_id);
993 return -EINVAL;
994 }
John Yound115b042009-07-27 12:05:15 -0700995 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
John Yound115b042009-07-27 12:05:15 -0700996 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700997
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700998 /* 3) Only the control endpoint is valid - one endpoint context */
Matt Evansf5960b62011-06-01 10:22:55 +1000999 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001000 switch (udev->speed) {
1001 case USB_SPEED_SUPER:
Matt Evansf5960b62011-06-01 10:22:55 +10001002 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001003 break;
1004 case USB_SPEED_HIGH:
Matt Evansf5960b62011-06-01 10:22:55 +10001005 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001006 break;
1007 case USB_SPEED_FULL:
Matt Evansf5960b62011-06-01 10:22:55 +10001008 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001009 break;
1010 case USB_SPEED_LOW:
Matt Evansf5960b62011-06-01 10:22:55 +10001011 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001012 break;
Greg Kroah-Hartman551cdbb2010-01-14 11:08:04 -08001013 case USB_SPEED_WIRELESS:
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001014 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1015 return -EINVAL;
1016 break;
1017 default:
1018 /* Speed was set earlier, this shouldn't happen. */
1019 BUG();
1020 }
1021 /* Find the root hub port this device is under */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001022 port_num = xhci_find_real_port_number(xhci, udev);
1023 if (!port_num)
1024 return -EINVAL;
Matt Evansf5960b62011-06-01 10:22:55 +10001025 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001026 /* Set the port number in the virtual_device to the faked port number */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001027 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1028 top_dev = top_dev->parent)
1029 /* Found device below root hub */;
Sarah Sharpfe301822011-09-02 11:05:41 -07001030 dev->fake_port = top_dev->portnum;
Sarah Sharp66381752011-09-02 11:05:45 -07001031 dev->real_port = port_num;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001032 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
Sarah Sharpfe301822011-09-02 11:05:41 -07001033 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001034
Sarah Sharp839c8172011-09-02 11:05:47 -07001035 /* Find the right bandwidth table that this device will be a part of.
1036 * If this is a full speed device attached directly to a root port (or a
1037 * decendent of one), it counts as a primary bandwidth domain, not a
1038 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1039 * will never be created for the HS root hub.
1040 */
1041 if (!udev->tt || !udev->tt->hub->parent) {
1042 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1043 } else {
1044 struct xhci_root_port_bw_info *rh_bw;
1045 struct xhci_tt_bw_info *tt_bw;
1046
1047 rh_bw = &xhci->rh_bw[port_num - 1];
1048 /* Find the right TT. */
1049 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1050 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1051 continue;
1052
1053 if (!dev->udev->tt->multi ||
1054 (udev->tt->multi &&
1055 tt_bw->ttport == dev->udev->ttport)) {
1056 dev->bw_table = &tt_bw->bw_table;
1057 dev->tt_info = tt_bw;
1058 break;
1059 }
1060 }
1061 if (!dev->tt_info)
1062 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1063 }
1064
Sarah Sharpaa1b13e2011-03-03 05:40:51 -08001065 /* Is this a LS/FS device under an external HS hub? */
1066 if (udev->tt && udev->tt->hub->parent) {
Matt Evans28ccd292011-03-29 13:40:46 +11001067 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1068 (udev->ttport << 8));
Sarah Sharp07b6de12009-09-04 10:53:19 -07001069 if (udev->tt->multi)
Matt Evans28ccd292011-03-29 13:40:46 +11001070 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001071 }
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001072 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001073 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1074
1075 /* Step 4 - ring already allocated */
1076 /* Step 5 */
Matt Evans28ccd292011-03-29 13:40:46 +11001077 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001078 /*
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001079 * XXX: Not sure about wireless USB devices.
1080 */
Sarah Sharp47aded82009-08-07 14:04:46 -07001081 switch (udev->speed) {
1082 case USB_SPEED_SUPER:
Matt Evans28ccd292011-03-29 13:40:46 +11001083 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
Sarah Sharp47aded82009-08-07 14:04:46 -07001084 break;
1085 case USB_SPEED_HIGH:
1086 /* USB core guesses at a 64-byte max packet first for FS devices */
1087 case USB_SPEED_FULL:
Matt Evans28ccd292011-03-29 13:40:46 +11001088 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
Sarah Sharp47aded82009-08-07 14:04:46 -07001089 break;
1090 case USB_SPEED_LOW:
Matt Evans28ccd292011-03-29 13:40:46 +11001091 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
Sarah Sharp47aded82009-08-07 14:04:46 -07001092 break;
Greg Kroah-Hartman551cdbb2010-01-14 11:08:04 -08001093 case USB_SPEED_WIRELESS:
Sarah Sharp47aded82009-08-07 14:04:46 -07001094 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1095 return -EINVAL;
1096 break;
1097 default:
1098 /* New speed? */
1099 BUG();
1100 }
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001101 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
Matt Evans28ccd292011-03-29 13:40:46 +11001102 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001103
Matt Evans28ccd292011-03-29 13:40:46 +11001104 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1105 dev->eps[0].ring->cycle_state);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001106
1107 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1108
1109 return 0;
1110}
1111
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001112/*
1113 * Convert interval expressed as 2^(bInterval - 1) == interval into
1114 * straight exponent value 2^n == interval.
1115 *
1116 */
1117static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1118 struct usb_host_endpoint *ep)
1119{
1120 unsigned int interval;
1121
1122 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1123 if (interval != ep->desc.bInterval - 1)
1124 dev_warn(&udev->dev,
Dmitry Torokhovcd3c18b2011-05-31 14:37:23 -07001125 "ep %#x - rounding interval to %d %sframes\n",
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001126 ep->desc.bEndpointAddress,
Dmitry Torokhovcd3c18b2011-05-31 14:37:23 -07001127 1 << interval,
1128 udev->speed == USB_SPEED_FULL ? "" : "micro");
1129
1130 if (udev->speed == USB_SPEED_FULL) {
1131 /*
1132 * Full speed isoc endpoints specify interval in frames,
1133 * not microframes. We are using microframes everywhere,
1134 * so adjust accordingly.
1135 */
1136 interval += 3; /* 1 frame = 2^3 uframes */
1137 }
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001138
1139 return interval;
1140}
1141
1142/*
1143 * Convert bInterval expressed in frames (in 1-255 range) to exponent of
1144 * microframes, rounded down to nearest power of 2.
1145 */
1146static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1147 struct usb_host_endpoint *ep)
1148{
1149 unsigned int interval;
1150
1151 interval = fls(8 * ep->desc.bInterval) - 1;
1152 interval = clamp_val(interval, 3, 10);
1153 if ((1 << interval) != 8 * ep->desc.bInterval)
1154 dev_warn(&udev->dev,
1155 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1156 ep->desc.bEndpointAddress,
1157 1 << interval,
1158 8 * ep->desc.bInterval);
1159
1160 return interval;
1161}
1162
Sarah Sharpf94e01862009-04-27 19:58:38 -07001163/* Return the polling or NAK interval.
1164 *
1165 * The polling interval is expressed in "microframes". If xHCI's Interval field
1166 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1167 *
1168 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1169 * is set to 0.
1170 */
Dmitry Torokhov575688e2011-03-20 02:15:16 -07001171static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
Sarah Sharpf94e01862009-04-27 19:58:38 -07001172 struct usb_host_endpoint *ep)
1173{
1174 unsigned int interval = 0;
1175
1176 switch (udev->speed) {
1177 case USB_SPEED_HIGH:
1178 /* Max NAK rate */
1179 if (usb_endpoint_xfer_control(&ep->desc) ||
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001180 usb_endpoint_xfer_bulk(&ep->desc)) {
Sarah Sharpf94e01862009-04-27 19:58:38 -07001181 interval = ep->desc.bInterval;
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001182 break;
1183 }
Sarah Sharpf94e01862009-04-27 19:58:38 -07001184 /* Fall through - SS and HS isoc/int have same decoding */
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001185
Sarah Sharpf94e01862009-04-27 19:58:38 -07001186 case USB_SPEED_SUPER:
1187 if (usb_endpoint_xfer_int(&ep->desc) ||
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001188 usb_endpoint_xfer_isoc(&ep->desc)) {
1189 interval = xhci_parse_exponent_interval(udev, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001190 }
1191 break;
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001192
Sarah Sharpf94e01862009-04-27 19:58:38 -07001193 case USB_SPEED_FULL:
Sarah Sharpb513d442011-05-13 13:10:01 -07001194 if (usb_endpoint_xfer_isoc(&ep->desc)) {
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001195 interval = xhci_parse_exponent_interval(udev, ep);
1196 break;
1197 }
1198 /*
Sarah Sharpb513d442011-05-13 13:10:01 -07001199 * Fall through for interrupt endpoint interval decoding
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001200 * since it uses the same rules as low speed interrupt
1201 * endpoints.
1202 */
1203
Sarah Sharpf94e01862009-04-27 19:58:38 -07001204 case USB_SPEED_LOW:
1205 if (usb_endpoint_xfer_int(&ep->desc) ||
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001206 usb_endpoint_xfer_isoc(&ep->desc)) {
1207
1208 interval = xhci_parse_frame_interval(udev, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001209 }
1210 break;
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001211
Sarah Sharpf94e01862009-04-27 19:58:38 -07001212 default:
1213 BUG();
1214 }
1215 return EP_INTERVAL(interval);
1216}
1217
Sarah Sharpc30c7912010-07-10 15:48:01 +02001218/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
Sarah Sharp1cf62242010-04-16 08:07:04 -07001219 * High speed endpoint descriptors can define "the number of additional
1220 * transaction opportunities per microframe", but that goes in the Max Burst
1221 * endpoint context field.
1222 */
Dmitry Torokhov575688e2011-03-20 02:15:16 -07001223static u32 xhci_get_endpoint_mult(struct usb_device *udev,
Sarah Sharp1cf62242010-04-16 08:07:04 -07001224 struct usb_host_endpoint *ep)
1225{
Sarah Sharpc30c7912010-07-10 15:48:01 +02001226 if (udev->speed != USB_SPEED_SUPER ||
1227 !usb_endpoint_xfer_isoc(&ep->desc))
Sarah Sharp1cf62242010-04-16 08:07:04 -07001228 return 0;
Alan Stern842f1692010-04-30 12:44:46 -04001229 return ep->ss_ep_comp.bmAttributes;
Sarah Sharp1cf62242010-04-16 08:07:04 -07001230}
1231
Dmitry Torokhov575688e2011-03-20 02:15:16 -07001232static u32 xhci_get_endpoint_type(struct usb_device *udev,
Sarah Sharpf94e01862009-04-27 19:58:38 -07001233 struct usb_host_endpoint *ep)
1234{
1235 int in;
1236 u32 type;
1237
1238 in = usb_endpoint_dir_in(&ep->desc);
1239 if (usb_endpoint_xfer_control(&ep->desc)) {
1240 type = EP_TYPE(CTRL_EP);
1241 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1242 if (in)
1243 type = EP_TYPE(BULK_IN_EP);
1244 else
1245 type = EP_TYPE(BULK_OUT_EP);
1246 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1247 if (in)
1248 type = EP_TYPE(ISOC_IN_EP);
1249 else
1250 type = EP_TYPE(ISOC_OUT_EP);
1251 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1252 if (in)
1253 type = EP_TYPE(INT_IN_EP);
1254 else
1255 type = EP_TYPE(INT_OUT_EP);
1256 } else {
1257 BUG();
1258 }
1259 return type;
1260}
1261
Sarah Sharp9238f252010-04-16 08:07:27 -07001262/* Return the maximum endpoint service interval time (ESIT) payload.
1263 * Basically, this is the maxpacket size, multiplied by the burst size
1264 * and mult size.
1265 */
Dmitry Torokhov575688e2011-03-20 02:15:16 -07001266static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
Sarah Sharp9238f252010-04-16 08:07:27 -07001267 struct usb_device *udev,
1268 struct usb_host_endpoint *ep)
1269{
1270 int max_burst;
1271 int max_packet;
1272
1273 /* Only applies for interrupt or isochronous endpoints */
1274 if (usb_endpoint_xfer_control(&ep->desc) ||
1275 usb_endpoint_xfer_bulk(&ep->desc))
1276 return 0;
1277
Alan Stern842f1692010-04-30 12:44:46 -04001278 if (udev->speed == USB_SPEED_SUPER)
Sebastian Andrzej Siewior64b3c302011-04-11 20:19:12 +02001279 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
Sarah Sharp9238f252010-04-16 08:07:27 -07001280
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001281 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1282 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
Sarah Sharp9238f252010-04-16 08:07:27 -07001283 /* A 0 in max burst means 1 transfer per ESIT */
1284 return max_packet * (max_burst + 1);
1285}
1286
Sarah Sharp8df75f42010-04-02 15:34:16 -07001287/* Set up an endpoint with one ring segment. Do not allocate stream rings.
1288 * Drivers will have to call usb_alloc_streams() to do that.
1289 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07001290int xhci_endpoint_init(struct xhci_hcd *xhci,
1291 struct xhci_virt_device *virt_dev,
1292 struct usb_device *udev,
Sarah Sharpf88ba782009-05-14 11:44:22 -07001293 struct usb_host_endpoint *ep,
1294 gfp_t mem_flags)
Sarah Sharpf94e01862009-04-27 19:58:38 -07001295{
1296 unsigned int ep_index;
1297 struct xhci_ep_ctx *ep_ctx;
1298 struct xhci_ring *ep_ring;
1299 unsigned int max_packet;
1300 unsigned int max_burst;
Sarah Sharp9238f252010-04-16 08:07:27 -07001301 u32 max_esit_payload;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001302
1303 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001304 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001305
1306 /* Set up the endpoint ring */
Andiry Xua061a5a2010-07-22 15:23:47 -07001307 /*
1308 * Isochronous endpoint ring needs bigger size because one isoc URB
1309 * carries multiple packets and it will insert multiple tds to the
1310 * ring.
1311 * This should be replaced with dynamic ring resizing in the future.
1312 */
1313 if (usb_endpoint_xfer_isoc(&ep->desc))
1314 virt_dev->eps[ep_index].new_ring =
Andiry Xu7e393a82011-09-23 14:19:54 -07001315 xhci_ring_alloc(xhci, 8, true, true, mem_flags);
Andiry Xua061a5a2010-07-22 15:23:47 -07001316 else
1317 virt_dev->eps[ep_index].new_ring =
Andiry Xu7e393a82011-09-23 14:19:54 -07001318 xhci_ring_alloc(xhci, 1, true, false, mem_flags);
Sarah Sharp74f9fe22009-12-03 09:44:29 -08001319 if (!virt_dev->eps[ep_index].new_ring) {
1320 /* Attempt to use the ring cache */
1321 if (virt_dev->num_rings_cached == 0)
1322 return -ENOMEM;
1323 virt_dev->eps[ep_index].new_ring =
1324 virt_dev->ring_cache[virt_dev->num_rings_cached];
1325 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1326 virt_dev->num_rings_cached--;
Andiry Xu7e393a82011-09-23 14:19:54 -07001327 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1328 usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
Sarah Sharp74f9fe22009-12-03 09:44:29 -08001329 }
Andiry Xud18240d2010-07-22 15:23:25 -07001330 virt_dev->eps[ep_index].skip = false;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001331 ep_ring = virt_dev->eps[ep_index].new_ring;
Matt Evans28ccd292011-03-29 13:40:46 +11001332 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001333
Matt Evans28ccd292011-03-29 13:40:46 +11001334 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1335 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001336
1337 /* FIXME dig Mult and streams info out of ep companion desc */
1338
Sarah Sharp47692d12009-07-27 12:04:27 -07001339 /* Allow 3 retries for everything but isoc;
Andiry Xu7b1fc2e2011-05-05 18:14:00 +08001340 * CErr shall be set to 0 for Isoch endpoints.
Sarah Sharp47692d12009-07-27 12:04:27 -07001341 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07001342 if (!usb_endpoint_xfer_isoc(&ep->desc))
Matt Evans28ccd292011-03-29 13:40:46 +11001343 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001344 else
Andiry Xu7b1fc2e2011-05-05 18:14:00 +08001345 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001346
Matt Evans28ccd292011-03-29 13:40:46 +11001347 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001348
1349 /* Set the max packet size and max burst */
1350 switch (udev->speed) {
1351 case USB_SPEED_SUPER:
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001352 max_packet = usb_endpoint_maxp(&ep->desc);
Matt Evans28ccd292011-03-29 13:40:46 +11001353 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
Sarah Sharpb10de142009-04-27 19:58:50 -07001354 /* dig out max burst from ep companion desc */
Alan Stern842f1692010-04-30 12:44:46 -04001355 max_packet = ep->ss_ep_comp.bMaxBurst;
Matt Evans28ccd292011-03-29 13:40:46 +11001356 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001357 break;
1358 case USB_SPEED_HIGH:
1359 /* bits 11:12 specify the number of additional transaction
1360 * opportunities per microframe (USB 2.0, section 9.6.6)
1361 */
1362 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1363 usb_endpoint_xfer_int(&ep->desc)) {
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001364 max_burst = (usb_endpoint_maxp(&ep->desc)
Matt Evans28ccd292011-03-29 13:40:46 +11001365 & 0x1800) >> 11;
1366 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001367 }
1368 /* Fall through */
1369 case USB_SPEED_FULL:
1370 case USB_SPEED_LOW:
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001371 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
Matt Evans28ccd292011-03-29 13:40:46 +11001372 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001373 break;
1374 default:
1375 BUG();
1376 }
Sarah Sharp9238f252010-04-16 08:07:27 -07001377 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
Matt Evans28ccd292011-03-29 13:40:46 +11001378 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
Sarah Sharp9238f252010-04-16 08:07:27 -07001379
1380 /*
1381 * XXX no idea how to calculate the average TRB buffer length for bulk
1382 * endpoints, as the driver gives us no clue how big each scatter gather
1383 * list entry (or buffer) is going to be.
1384 *
1385 * For isochronous and interrupt endpoints, we set it to the max
1386 * available, until we have new API in the USB core to allow drivers to
1387 * declare how much bandwidth they actually need.
1388 *
1389 * Normally, it would be calculated by taking the total of the buffer
1390 * lengths in the TD and then dividing by the number of TRBs in a TD,
1391 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1392 * use Event Data TRBs, and we don't chain in a link TRB on short
1393 * transfers, we're basically dividing by 1.
Andiry Xu51eb01a2011-05-05 18:13:58 +08001394 *
1395 * xHCI 1.0 specification indicates that the Average TRB Length should
1396 * be set to 8 for control endpoints.
Sarah Sharp9238f252010-04-16 08:07:27 -07001397 */
Andiry Xu51eb01a2011-05-05 18:13:58 +08001398 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1399 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1400 else
1401 ep_ctx->tx_info |=
1402 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
Sarah Sharp9238f252010-04-16 08:07:27 -07001403
Sarah Sharpf94e01862009-04-27 19:58:38 -07001404 /* FIXME Debug endpoint context */
1405 return 0;
1406}
1407
1408void xhci_endpoint_zero(struct xhci_hcd *xhci,
1409 struct xhci_virt_device *virt_dev,
1410 struct usb_host_endpoint *ep)
1411{
1412 unsigned int ep_index;
1413 struct xhci_ep_ctx *ep_ctx;
1414
1415 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001416 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001417
1418 ep_ctx->ep_info = 0;
1419 ep_ctx->ep_info2 = 0;
Sarah Sharp8e595a52009-07-27 12:03:31 -07001420 ep_ctx->deq = 0;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001421 ep_ctx->tx_info = 0;
1422 /* Don't free the endpoint ring until the set interface or configuration
1423 * request succeeds.
1424 */
1425}
1426
Sarah Sharp9af5d712011-09-02 11:05:48 -07001427void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1428{
1429 bw_info->ep_interval = 0;
1430 bw_info->mult = 0;
1431 bw_info->num_packets = 0;
1432 bw_info->max_packet_size = 0;
1433 bw_info->type = 0;
1434 bw_info->max_esit_payload = 0;
1435}
1436
1437void xhci_update_bw_info(struct xhci_hcd *xhci,
1438 struct xhci_container_ctx *in_ctx,
1439 struct xhci_input_control_ctx *ctrl_ctx,
1440 struct xhci_virt_device *virt_dev)
1441{
1442 struct xhci_bw_info *bw_info;
1443 struct xhci_ep_ctx *ep_ctx;
1444 unsigned int ep_type;
1445 int i;
1446
1447 for (i = 1; i < 31; ++i) {
1448 bw_info = &virt_dev->eps[i].bw_info;
1449
1450 /* We can't tell what endpoint type is being dropped, but
1451 * unconditionally clearing the bandwidth info for non-periodic
1452 * endpoints should be harmless because the info will never be
1453 * set in the first place.
1454 */
1455 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1456 /* Dropped endpoint */
1457 xhci_clear_endpoint_bw_info(bw_info);
1458 continue;
1459 }
1460
1461 if (EP_IS_ADDED(ctrl_ctx, i)) {
1462 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1463 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1464
1465 /* Ignore non-periodic endpoints */
1466 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1467 ep_type != ISOC_IN_EP &&
1468 ep_type != INT_IN_EP)
1469 continue;
1470
1471 /* Added or changed endpoint */
1472 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1473 le32_to_cpu(ep_ctx->ep_info));
Sarah Sharp170c0262011-09-13 16:41:12 -07001474 /* Number of packets and mult are zero-based in the
1475 * input context, but we want one-based for the
1476 * interval table.
Sarah Sharp9af5d712011-09-02 11:05:48 -07001477 */
Sarah Sharp170c0262011-09-13 16:41:12 -07001478 bw_info->mult = CTX_TO_EP_MULT(
1479 le32_to_cpu(ep_ctx->ep_info)) + 1;
Sarah Sharp9af5d712011-09-02 11:05:48 -07001480 bw_info->num_packets = CTX_TO_MAX_BURST(
1481 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1482 bw_info->max_packet_size = MAX_PACKET_DECODED(
1483 le32_to_cpu(ep_ctx->ep_info2));
1484 bw_info->type = ep_type;
1485 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1486 le32_to_cpu(ep_ctx->tx_info));
1487 }
1488 }
1489}
1490
Sarah Sharpf2217e82009-08-07 14:04:43 -07001491/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1492 * Useful when you want to change one particular aspect of the endpoint and then
1493 * issue a configure endpoint command.
1494 */
1495void xhci_endpoint_copy(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001496 struct xhci_container_ctx *in_ctx,
1497 struct xhci_container_ctx *out_ctx,
1498 unsigned int ep_index)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001499{
1500 struct xhci_ep_ctx *out_ep_ctx;
1501 struct xhci_ep_ctx *in_ep_ctx;
1502
Sarah Sharp913a8a32009-09-04 10:53:13 -07001503 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1504 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001505
1506 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1507 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1508 in_ep_ctx->deq = out_ep_ctx->deq;
1509 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1510}
1511
1512/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1513 * Useful when you want to change one particular aspect of the endpoint and then
1514 * issue a configure endpoint command. Only the context entries field matters,
1515 * but we'll copy the whole thing anyway.
1516 */
Sarah Sharp913a8a32009-09-04 10:53:13 -07001517void xhci_slot_copy(struct xhci_hcd *xhci,
1518 struct xhci_container_ctx *in_ctx,
1519 struct xhci_container_ctx *out_ctx)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001520{
1521 struct xhci_slot_ctx *in_slot_ctx;
1522 struct xhci_slot_ctx *out_slot_ctx;
1523
Sarah Sharp913a8a32009-09-04 10:53:13 -07001524 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1525 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001526
1527 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1528 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1529 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1530 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1531}
1532
John Youn254c80a2009-07-27 12:05:03 -07001533/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1534static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1535{
1536 int i;
1537 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1538 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1539
1540 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
1541
1542 if (!num_sp)
1543 return 0;
1544
1545 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1546 if (!xhci->scratchpad)
1547 goto fail_sp;
1548
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001549 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
John Youn254c80a2009-07-27 12:05:03 -07001550 num_sp * sizeof(u64),
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001551 &xhci->scratchpad->sp_dma, flags);
John Youn254c80a2009-07-27 12:05:03 -07001552 if (!xhci->scratchpad->sp_array)
1553 goto fail_sp2;
1554
1555 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1556 if (!xhci->scratchpad->sp_buffers)
1557 goto fail_sp3;
1558
1559 xhci->scratchpad->sp_dma_buffers =
1560 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1561
1562 if (!xhci->scratchpad->sp_dma_buffers)
1563 goto fail_sp4;
1564
Matt Evans28ccd292011-03-29 13:40:46 +11001565 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
John Youn254c80a2009-07-27 12:05:03 -07001566 for (i = 0; i < num_sp; i++) {
1567 dma_addr_t dma;
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001568 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1569 flags);
John Youn254c80a2009-07-27 12:05:03 -07001570 if (!buf)
1571 goto fail_sp5;
1572
1573 xhci->scratchpad->sp_array[i] = dma;
1574 xhci->scratchpad->sp_buffers[i] = buf;
1575 xhci->scratchpad->sp_dma_buffers[i] = dma;
1576 }
1577
1578 return 0;
1579
1580 fail_sp5:
1581 for (i = i - 1; i >= 0; i--) {
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001582 dma_free_coherent(dev, xhci->page_size,
John Youn254c80a2009-07-27 12:05:03 -07001583 xhci->scratchpad->sp_buffers[i],
1584 xhci->scratchpad->sp_dma_buffers[i]);
1585 }
1586 kfree(xhci->scratchpad->sp_dma_buffers);
1587
1588 fail_sp4:
1589 kfree(xhci->scratchpad->sp_buffers);
1590
1591 fail_sp3:
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001592 dma_free_coherent(dev, num_sp * sizeof(u64),
John Youn254c80a2009-07-27 12:05:03 -07001593 xhci->scratchpad->sp_array,
1594 xhci->scratchpad->sp_dma);
1595
1596 fail_sp2:
1597 kfree(xhci->scratchpad);
1598 xhci->scratchpad = NULL;
1599
1600 fail_sp:
1601 return -ENOMEM;
1602}
1603
1604static void scratchpad_free(struct xhci_hcd *xhci)
1605{
1606 int num_sp;
1607 int i;
1608 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1609
1610 if (!xhci->scratchpad)
1611 return;
1612
1613 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1614
1615 for (i = 0; i < num_sp; i++) {
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001616 dma_free_coherent(&pdev->dev, xhci->page_size,
John Youn254c80a2009-07-27 12:05:03 -07001617 xhci->scratchpad->sp_buffers[i],
1618 xhci->scratchpad->sp_dma_buffers[i]);
1619 }
1620 kfree(xhci->scratchpad->sp_dma_buffers);
1621 kfree(xhci->scratchpad->sp_buffers);
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001622 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
John Youn254c80a2009-07-27 12:05:03 -07001623 xhci->scratchpad->sp_array,
1624 xhci->scratchpad->sp_dma);
1625 kfree(xhci->scratchpad);
1626 xhci->scratchpad = NULL;
1627}
1628
Sarah Sharp913a8a32009-09-04 10:53:13 -07001629struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
Sarah Sharpa1d78c12009-12-09 15:59:03 -08001630 bool allocate_in_ctx, bool allocate_completion,
1631 gfp_t mem_flags)
Sarah Sharp913a8a32009-09-04 10:53:13 -07001632{
1633 struct xhci_command *command;
1634
1635 command = kzalloc(sizeof(*command), mem_flags);
1636 if (!command)
1637 return NULL;
1638
Sarah Sharpa1d78c12009-12-09 15:59:03 -08001639 if (allocate_in_ctx) {
1640 command->in_ctx =
1641 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1642 mem_flags);
1643 if (!command->in_ctx) {
1644 kfree(command);
1645 return NULL;
1646 }
Julia Lawall06e18292009-11-21 12:51:47 +01001647 }
Sarah Sharp913a8a32009-09-04 10:53:13 -07001648
1649 if (allocate_completion) {
1650 command->completion =
1651 kzalloc(sizeof(struct completion), mem_flags);
1652 if (!command->completion) {
1653 xhci_free_container_ctx(xhci, command->in_ctx);
Julia Lawall06e18292009-11-21 12:51:47 +01001654 kfree(command);
Sarah Sharp913a8a32009-09-04 10:53:13 -07001655 return NULL;
1656 }
1657 init_completion(command->completion);
1658 }
1659
1660 command->status = 0;
1661 INIT_LIST_HEAD(&command->cmd_list);
1662 return command;
1663}
1664
Andiry Xu8e51adc2010-07-22 15:23:31 -07001665void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1666{
Andiry Xu2ffdea22011-09-02 11:05:57 -07001667 if (urb_priv) {
1668 kfree(urb_priv->td[0]);
1669 kfree(urb_priv);
Andiry Xu8e51adc2010-07-22 15:23:31 -07001670 }
Andiry Xu8e51adc2010-07-22 15:23:31 -07001671}
1672
Sarah Sharp913a8a32009-09-04 10:53:13 -07001673void xhci_free_command(struct xhci_hcd *xhci,
1674 struct xhci_command *command)
1675{
1676 xhci_free_container_ctx(xhci,
1677 command->in_ctx);
1678 kfree(command->completion);
1679 kfree(command);
1680}
1681
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001682void xhci_mem_cleanup(struct xhci_hcd *xhci)
1683{
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001684 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
Andiry Xu95743232011-09-23 14:19:51 -07001685 struct dev_info *dev_info, *next;
1686 unsigned long flags;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001687 int size;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001688 int i;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001689
1690 /* Free the Event Ring Segment Table and the actual Event Ring */
Sarah Sharpd94c05e2009-11-03 22:02:22 -08001691 if (xhci->ir_set) {
1692 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
1693 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
1694 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
1695 }
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001696 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1697 if (xhci->erst.entries)
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001698 dma_free_coherent(&pdev->dev, size,
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001699 xhci->erst.entries, xhci->erst.erst_dma_addr);
1700 xhci->erst.entries = NULL;
1701 xhci_dbg(xhci, "Freed ERST\n");
1702 if (xhci->event_ring)
1703 xhci_ring_free(xhci, xhci->event_ring);
1704 xhci->event_ring = NULL;
1705 xhci_dbg(xhci, "Freed event ring\n");
1706
Sarah Sharp8e595a52009-07-27 12:03:31 -07001707 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001708 if (xhci->cmd_ring)
1709 xhci_ring_free(xhci, xhci->cmd_ring);
1710 xhci->cmd_ring = NULL;
1711 xhci_dbg(xhci, "Freed command ring\n");
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001712
1713 for (i = 1; i < MAX_HC_SLOTS; ++i)
1714 xhci_free_virt_device(xhci, i);
1715
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001716 if (xhci->segment_pool)
1717 dma_pool_destroy(xhci->segment_pool);
1718 xhci->segment_pool = NULL;
1719 xhci_dbg(xhci, "Freed segment pool\n");
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001720
1721 if (xhci->device_pool)
1722 dma_pool_destroy(xhci->device_pool);
1723 xhci->device_pool = NULL;
1724 xhci_dbg(xhci, "Freed device context pool\n");
1725
Sarah Sharp8df75f42010-04-02 15:34:16 -07001726 if (xhci->small_streams_pool)
1727 dma_pool_destroy(xhci->small_streams_pool);
1728 xhci->small_streams_pool = NULL;
1729 xhci_dbg(xhci, "Freed small stream array pool\n");
1730
1731 if (xhci->medium_streams_pool)
1732 dma_pool_destroy(xhci->medium_streams_pool);
1733 xhci->medium_streams_pool = NULL;
1734 xhci_dbg(xhci, "Freed medium stream array pool\n");
1735
Sarah Sharp8e595a52009-07-27 12:03:31 -07001736 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
Sarah Sharpa74588f2009-04-27 19:53:42 -07001737 if (xhci->dcbaa)
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001738 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
Sarah Sharpa74588f2009-04-27 19:53:42 -07001739 xhci->dcbaa, xhci->dcbaa->dma);
1740 xhci->dcbaa = NULL;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001741
Sarah Sharp5294bea2009-11-04 11:22:19 -08001742 scratchpad_free(xhci);
Sarah Sharpda6699c2010-10-26 16:47:13 -07001743
Andiry Xu95743232011-09-23 14:19:51 -07001744 spin_lock_irqsave(&xhci->lock, flags);
1745 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1746 list_del(&dev_info->list);
1747 kfree(dev_info);
1748 }
1749 spin_unlock_irqrestore(&xhci->lock, flags);
1750
Sarah Sharpda6699c2010-10-26 16:47:13 -07001751 xhci->num_usb2_ports = 0;
1752 xhci->num_usb3_ports = 0;
1753 kfree(xhci->usb2_ports);
1754 kfree(xhci->usb3_ports);
1755 kfree(xhci->port_array);
Sarah Sharp839c8172011-09-02 11:05:47 -07001756 kfree(xhci->rh_bw);
Sarah Sharpda6699c2010-10-26 16:47:13 -07001757
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001758 xhci->page_size = 0;
1759 xhci->page_shift = 0;
Sarah Sharp20b67cf2010-12-15 12:47:14 -08001760 xhci->bus_state[0].bus_suspended = 0;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001761 xhci->bus_state[1].bus_suspended = 0;
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001762}
1763
Sarah Sharp6648f292009-11-09 13:35:23 -08001764static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1765 struct xhci_segment *input_seg,
1766 union xhci_trb *start_trb,
1767 union xhci_trb *end_trb,
1768 dma_addr_t input_dma,
1769 struct xhci_segment *result_seg,
1770 char *test_name, int test_number)
1771{
1772 unsigned long long start_dma;
1773 unsigned long long end_dma;
1774 struct xhci_segment *seg;
1775
1776 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1777 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1778
1779 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1780 if (seg != result_seg) {
1781 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1782 test_name, test_number);
1783 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1784 "input DMA 0x%llx\n",
1785 input_seg,
1786 (unsigned long long) input_dma);
1787 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1788 "ending TRB %p (0x%llx DMA)\n",
1789 start_trb, start_dma,
1790 end_trb, end_dma);
1791 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1792 result_seg, seg);
1793 return -1;
1794 }
1795 return 0;
1796}
1797
1798/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1799static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1800{
1801 struct {
1802 dma_addr_t input_dma;
1803 struct xhci_segment *result_seg;
1804 } simple_test_vector [] = {
1805 /* A zeroed DMA field should fail */
1806 { 0, NULL },
1807 /* One TRB before the ring start should fail */
1808 { xhci->event_ring->first_seg->dma - 16, NULL },
1809 /* One byte before the ring start should fail */
1810 { xhci->event_ring->first_seg->dma - 1, NULL },
1811 /* Starting TRB should succeed */
1812 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1813 /* Ending TRB should succeed */
1814 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1815 xhci->event_ring->first_seg },
1816 /* One byte after the ring end should fail */
1817 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1818 /* One TRB after the ring end should fail */
1819 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1820 /* An address of all ones should fail */
1821 { (dma_addr_t) (~0), NULL },
1822 };
1823 struct {
1824 struct xhci_segment *input_seg;
1825 union xhci_trb *start_trb;
1826 union xhci_trb *end_trb;
1827 dma_addr_t input_dma;
1828 struct xhci_segment *result_seg;
1829 } complex_test_vector [] = {
1830 /* Test feeding a valid DMA address from a different ring */
1831 { .input_seg = xhci->event_ring->first_seg,
1832 .start_trb = xhci->event_ring->first_seg->trbs,
1833 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1834 .input_dma = xhci->cmd_ring->first_seg->dma,
1835 .result_seg = NULL,
1836 },
1837 /* Test feeding a valid end TRB from a different ring */
1838 { .input_seg = xhci->event_ring->first_seg,
1839 .start_trb = xhci->event_ring->first_seg->trbs,
1840 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1841 .input_dma = xhci->cmd_ring->first_seg->dma,
1842 .result_seg = NULL,
1843 },
1844 /* Test feeding a valid start and end TRB from a different ring */
1845 { .input_seg = xhci->event_ring->first_seg,
1846 .start_trb = xhci->cmd_ring->first_seg->trbs,
1847 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1848 .input_dma = xhci->cmd_ring->first_seg->dma,
1849 .result_seg = NULL,
1850 },
1851 /* TRB in this ring, but after this TD */
1852 { .input_seg = xhci->event_ring->first_seg,
1853 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1854 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1855 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1856 .result_seg = NULL,
1857 },
1858 /* TRB in this ring, but before this TD */
1859 { .input_seg = xhci->event_ring->first_seg,
1860 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1861 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1862 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1863 .result_seg = NULL,
1864 },
1865 /* TRB in this ring, but after this wrapped TD */
1866 { .input_seg = xhci->event_ring->first_seg,
1867 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1868 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1869 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1870 .result_seg = NULL,
1871 },
1872 /* TRB in this ring, but before this wrapped TD */
1873 { .input_seg = xhci->event_ring->first_seg,
1874 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1875 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1876 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1877 .result_seg = NULL,
1878 },
1879 /* TRB not in this ring, and we have a wrapped TD */
1880 { .input_seg = xhci->event_ring->first_seg,
1881 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1882 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1883 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1884 .result_seg = NULL,
1885 },
1886 };
1887
1888 unsigned int num_tests;
1889 int i, ret;
1890
Kulikov Vasiliye10fa472010-06-28 15:55:46 +04001891 num_tests = ARRAY_SIZE(simple_test_vector);
Sarah Sharp6648f292009-11-09 13:35:23 -08001892 for (i = 0; i < num_tests; i++) {
1893 ret = xhci_test_trb_in_td(xhci,
1894 xhci->event_ring->first_seg,
1895 xhci->event_ring->first_seg->trbs,
1896 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1897 simple_test_vector[i].input_dma,
1898 simple_test_vector[i].result_seg,
1899 "Simple", i);
1900 if (ret < 0)
1901 return ret;
1902 }
1903
Kulikov Vasiliye10fa472010-06-28 15:55:46 +04001904 num_tests = ARRAY_SIZE(complex_test_vector);
Sarah Sharp6648f292009-11-09 13:35:23 -08001905 for (i = 0; i < num_tests; i++) {
1906 ret = xhci_test_trb_in_td(xhci,
1907 complex_test_vector[i].input_seg,
1908 complex_test_vector[i].start_trb,
1909 complex_test_vector[i].end_trb,
1910 complex_test_vector[i].input_dma,
1911 complex_test_vector[i].result_seg,
1912 "Complex", i);
1913 if (ret < 0)
1914 return ret;
1915 }
1916 xhci_dbg(xhci, "TRB math tests passed.\n");
1917 return 0;
1918}
1919
Sarah Sharp257d5852010-07-29 22:12:56 -07001920static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1921{
1922 u64 temp;
1923 dma_addr_t deq;
1924
1925 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
1926 xhci->event_ring->dequeue);
1927 if (deq == 0 && !in_interrupt())
1928 xhci_warn(xhci, "WARN something wrong with SW event ring "
1929 "dequeue ptr.\n");
1930 /* Update HC event ring dequeue pointer */
1931 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
1932 temp &= ERST_PTR_MASK;
1933 /* Don't clear the EHB bit (which is RW1C) because
1934 * there might be more events to service.
1935 */
1936 temp &= ~ERST_EHB;
1937 xhci_dbg(xhci, "// Write event ring dequeue pointer, "
1938 "preserving EHB bit\n");
1939 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
1940 &xhci->ir_set->erst_dequeue);
1941}
1942
Sarah Sharpda6699c2010-10-26 16:47:13 -07001943static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
Matt Evans28ccd292011-03-29 13:40:46 +11001944 __le32 __iomem *addr, u8 major_revision)
Sarah Sharpda6699c2010-10-26 16:47:13 -07001945{
1946 u32 temp, port_offset, port_count;
1947 int i;
1948
1949 if (major_revision > 0x03) {
1950 xhci_warn(xhci, "Ignoring unknown port speed, "
1951 "Ext Cap %p, revision = 0x%x\n",
1952 addr, major_revision);
1953 /* Ignoring port protocol we can't understand. FIXME */
1954 return;
1955 }
1956
1957 /* Port offset and count in the third dword, see section 7.2 */
1958 temp = xhci_readl(xhci, addr + 2);
1959 port_offset = XHCI_EXT_PORT_OFF(temp);
1960 port_count = XHCI_EXT_PORT_COUNT(temp);
1961 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
1962 "count = %u, revision = 0x%x\n",
1963 addr, port_offset, port_count, major_revision);
1964 /* Port count includes the current port offset */
1965 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
1966 /* WTF? "Valid values are ‘1’ to MaxPorts" */
1967 return;
Andiry Xufc71ff72011-09-23 14:19:51 -07001968
1969 /* Check the host's USB2 LPM capability */
1970 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
1971 (temp & XHCI_L1C)) {
1972 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
1973 xhci->sw_lpm_support = 1;
1974 }
1975
1976 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
1977 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
1978 xhci->sw_lpm_support = 1;
1979 if (temp & XHCI_HLC) {
1980 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
1981 xhci->hw_lpm_support = 1;
1982 }
1983 }
1984
Sarah Sharpda6699c2010-10-26 16:47:13 -07001985 port_offset--;
1986 for (i = port_offset; i < (port_offset + port_count); i++) {
1987 /* Duplicate entry. Ignore the port if the revisions differ. */
1988 if (xhci->port_array[i] != 0) {
1989 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
1990 " port %u\n", addr, i);
1991 xhci_warn(xhci, "Port was marked as USB %u, "
1992 "duplicated as USB %u\n",
1993 xhci->port_array[i], major_revision);
1994 /* Only adjust the roothub port counts if we haven't
1995 * found a similar duplicate.
1996 */
1997 if (xhci->port_array[i] != major_revision &&
Dan Carpenter22e04872011-03-17 22:39:49 +03001998 xhci->port_array[i] != DUPLICATE_ENTRY) {
Sarah Sharpda6699c2010-10-26 16:47:13 -07001999 if (xhci->port_array[i] == 0x03)
2000 xhci->num_usb3_ports--;
2001 else
2002 xhci->num_usb2_ports--;
Dan Carpenter22e04872011-03-17 22:39:49 +03002003 xhci->port_array[i] = DUPLICATE_ENTRY;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002004 }
2005 /* FIXME: Should we disable the port? */
Sarah Sharpf8bbeab2010-12-09 10:29:00 -08002006 continue;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002007 }
2008 xhci->port_array[i] = major_revision;
2009 if (major_revision == 0x03)
2010 xhci->num_usb3_ports++;
2011 else
2012 xhci->num_usb2_ports++;
2013 }
2014 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2015}
2016
2017/*
2018 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2019 * specify what speeds each port is supposed to be. We can't count on the port
2020 * speed bits in the PORTSC register being correct until a device is connected,
2021 * but we need to set up the two fake roothubs with the correct number of USB
2022 * 3.0 and USB 2.0 ports at host controller initialization time.
2023 */
2024static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2025{
Matt Evans28ccd292011-03-29 13:40:46 +11002026 __le32 __iomem *addr;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002027 u32 offset;
2028 unsigned int num_ports;
Sarah Sharp2e279802011-09-02 11:05:50 -07002029 int i, j, port_index;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002030
2031 addr = &xhci->cap_regs->hcc_params;
2032 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
2033 if (offset == 0) {
2034 xhci_err(xhci, "No Extended Capability registers, "
2035 "unable to set up roothub.\n");
2036 return -ENODEV;
2037 }
2038
2039 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2040 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2041 if (!xhci->port_array)
2042 return -ENOMEM;
2043
Sarah Sharp839c8172011-09-02 11:05:47 -07002044 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2045 if (!xhci->rh_bw)
2046 return -ENOMEM;
Sarah Sharp2e279802011-09-02 11:05:50 -07002047 for (i = 0; i < num_ports; i++) {
2048 struct xhci_interval_bw_table *bw_table;
2049
Sarah Sharp839c8172011-09-02 11:05:47 -07002050 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
Sarah Sharp2e279802011-09-02 11:05:50 -07002051 bw_table = &xhci->rh_bw[i].bw_table;
2052 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2053 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2054 }
Sarah Sharp839c8172011-09-02 11:05:47 -07002055
Sarah Sharpda6699c2010-10-26 16:47:13 -07002056 /*
2057 * For whatever reason, the first capability offset is from the
2058 * capability register base, not from the HCCPARAMS register.
2059 * See section 5.3.6 for offset calculation.
2060 */
2061 addr = &xhci->cap_regs->hc_capbase + offset;
2062 while (1) {
2063 u32 cap_id;
2064
2065 cap_id = xhci_readl(xhci, addr);
2066 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2067 xhci_add_in_port(xhci, num_ports, addr,
2068 (u8) XHCI_EXT_PORT_MAJOR(cap_id));
2069 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2070 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2071 == num_ports)
2072 break;
2073 /*
2074 * Once you're into the Extended Capabilities, the offset is
2075 * always relative to the register holding the offset.
2076 */
2077 addr += offset;
2078 }
2079
2080 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2081 xhci_warn(xhci, "No ports on the roothubs?\n");
2082 return -ENODEV;
2083 }
2084 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
2085 xhci->num_usb2_ports, xhci->num_usb3_ports);
Sarah Sharpd30b2a22010-11-23 10:42:22 -08002086
2087 /* Place limits on the number of roothub ports so that the hub
2088 * descriptors aren't longer than the USB core will allocate.
2089 */
2090 if (xhci->num_usb3_ports > 15) {
2091 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
2092 xhci->num_usb3_ports = 15;
2093 }
2094 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2095 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
2096 USB_MAXCHILDREN);
2097 xhci->num_usb2_ports = USB_MAXCHILDREN;
2098 }
2099
Sarah Sharpda6699c2010-10-26 16:47:13 -07002100 /*
2101 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2102 * Not sure how the USB core will handle a hub with no ports...
2103 */
2104 if (xhci->num_usb2_ports) {
2105 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2106 xhci->num_usb2_ports, flags);
2107 if (!xhci->usb2_ports)
2108 return -ENOMEM;
2109
2110 port_index = 0;
Sarah Sharpf8bbeab2010-12-09 10:29:00 -08002111 for (i = 0; i < num_ports; i++) {
2112 if (xhci->port_array[i] == 0x03 ||
2113 xhci->port_array[i] == 0 ||
Dan Carpenter22e04872011-03-17 22:39:49 +03002114 xhci->port_array[i] == DUPLICATE_ENTRY)
Sarah Sharpf8bbeab2010-12-09 10:29:00 -08002115 continue;
2116
2117 xhci->usb2_ports[port_index] =
2118 &xhci->op_regs->port_status_base +
2119 NUM_PORT_REGS*i;
2120 xhci_dbg(xhci, "USB 2.0 port at index %u, "
2121 "addr = %p\n", i,
2122 xhci->usb2_ports[port_index]);
2123 port_index++;
Sarah Sharpd30b2a22010-11-23 10:42:22 -08002124 if (port_index == xhci->num_usb2_ports)
2125 break;
Sarah Sharpf8bbeab2010-12-09 10:29:00 -08002126 }
Sarah Sharpda6699c2010-10-26 16:47:13 -07002127 }
2128 if (xhci->num_usb3_ports) {
2129 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2130 xhci->num_usb3_ports, flags);
2131 if (!xhci->usb3_ports)
2132 return -ENOMEM;
2133
2134 port_index = 0;
2135 for (i = 0; i < num_ports; i++)
2136 if (xhci->port_array[i] == 0x03) {
2137 xhci->usb3_ports[port_index] =
2138 &xhci->op_regs->port_status_base +
2139 NUM_PORT_REGS*i;
2140 xhci_dbg(xhci, "USB 3.0 port at index %u, "
2141 "addr = %p\n", i,
2142 xhci->usb3_ports[port_index]);
2143 port_index++;
Sarah Sharpd30b2a22010-11-23 10:42:22 -08002144 if (port_index == xhci->num_usb3_ports)
2145 break;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002146 }
2147 }
2148 return 0;
2149}
Sarah Sharp6648f292009-11-09 13:35:23 -08002150
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002151int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2152{
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002153 dma_addr_t dma;
2154 struct device *dev = xhci_to_hcd(xhci)->self.controller;
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002155 unsigned int val, val2;
Sarah Sharp8e595a52009-07-27 12:03:31 -07002156 u64 val_64;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002157 struct xhci_segment *seg;
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002158 u32 page_size;
2159 int i;
2160
2161 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2162 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
2163 for (i = 0; i < 16; i++) {
2164 if ((0x1 & page_size) != 0)
2165 break;
2166 page_size = page_size >> 1;
2167 }
2168 if (i < 16)
2169 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
2170 else
2171 xhci_warn(xhci, "WARN: no supported page size\n");
2172 /* Use 4K pages, since that's common and the minimum the HC supports */
2173 xhci->page_shift = 12;
2174 xhci->page_size = 1 << xhci->page_shift;
2175 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
2176
2177 /*
2178 * Program the Number of Device Slots Enabled field in the CONFIG
2179 * register with the max value of slots the HC can handle.
2180 */
2181 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2182 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
2183 (unsigned int) val);
2184 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2185 val |= (val2 & ~HCS_SLOTS_MASK);
2186 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
2187 (unsigned int) val);
2188 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2189
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002190 /*
Sarah Sharpa74588f2009-04-27 19:53:42 -07002191 * Section 5.4.8 - doorbell array must be
2192 * "physically contiguous and 64-byte (cache line) aligned".
2193 */
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07002194 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2195 GFP_KERNEL);
Sarah Sharpa74588f2009-04-27 19:53:42 -07002196 if (!xhci->dcbaa)
2197 goto fail;
2198 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2199 xhci->dcbaa->dma = dma;
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002200 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
2201 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
Sarah Sharp8e595a52009-07-27 12:03:31 -07002202 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
Sarah Sharpa74588f2009-04-27 19:53:42 -07002203
2204 /*
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002205 * Initialize the ring segment pool. The ring must be a contiguous
2206 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2207 * however, the command ring segment needs 64-byte aligned segments,
2208 * so we pick the greater alignment need.
2209 */
2210 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2211 SEGMENT_SIZE, 64, xhci->page_size);
John Yound115b042009-07-27 12:05:15 -07002212
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002213 /* See Table 46 and Note on Figure 55 */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002214 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
John Yound115b042009-07-27 12:05:15 -07002215 2112, 64, xhci->page_size);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002216 if (!xhci->segment_pool || !xhci->device_pool)
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002217 goto fail;
2218
Sarah Sharp8df75f42010-04-02 15:34:16 -07002219 /* Linear stream context arrays don't have any boundary restrictions,
2220 * and only need to be 16-byte aligned.
2221 */
2222 xhci->small_streams_pool =
2223 dma_pool_create("xHCI 256 byte stream ctx arrays",
2224 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2225 xhci->medium_streams_pool =
2226 dma_pool_create("xHCI 1KB stream ctx arrays",
2227 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2228 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07002229 * will be allocated with dma_alloc_coherent()
Sarah Sharp8df75f42010-04-02 15:34:16 -07002230 */
2231
2232 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2233 goto fail;
2234
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002235 /* Set up the command ring to have one segments for now. */
Andiry Xu7e393a82011-09-23 14:19:54 -07002236 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002237 if (!xhci->cmd_ring)
2238 goto fail;
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002239 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2240 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2241 (unsigned long long)xhci->cmd_ring->first_seg->dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002242
2243 /* Set the address in the Command Ring Control register */
Sarah Sharp8e595a52009-07-27 12:03:31 -07002244 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2245 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2246 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002247 xhci->cmd_ring->cycle_state;
Sarah Sharp8e595a52009-07-27 12:03:31 -07002248 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
2249 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002250 xhci_dbg_cmd_ptrs(xhci);
2251
2252 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2253 val &= DBOFF_MASK;
2254 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
2255 " from cap regs base addr\n", val);
Dmitry Torokhovc50a00f2011-02-08 16:29:34 -08002256 xhci->dba = (void __iomem *) xhci->cap_regs + val;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002257 xhci_dbg_regs(xhci);
2258 xhci_print_run_regs(xhci);
2259 /* Set ir_set to interrupt register set 0 */
Dmitry Torokhovc50a00f2011-02-08 16:29:34 -08002260 xhci->ir_set = &xhci->run_regs->ir_set[0];
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002261
2262 /*
2263 * Event ring setup: Allocate a normal ring, but also setup
2264 * the event ring segment table (ERST). Section 4.9.3.
2265 */
2266 xhci_dbg(xhci, "// Allocating event ring\n");
Andiry Xu7e393a82011-09-23 14:19:54 -07002267 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
2268 flags);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002269 if (!xhci->event_ring)
2270 goto fail;
Sarah Sharp6648f292009-11-09 13:35:23 -08002271 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2272 goto fail;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002273
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07002274 xhci->erst.entries = dma_alloc_coherent(dev,
2275 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2276 GFP_KERNEL);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002277 if (!xhci->erst.entries)
2278 goto fail;
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002279 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
2280 (unsigned long long)dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002281
2282 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2283 xhci->erst.num_entries = ERST_NUM_SEGS;
2284 xhci->erst.erst_dma_addr = dma;
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002285 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002286 xhci->erst.num_entries,
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002287 xhci->erst.entries,
2288 (unsigned long long)xhci->erst.erst_dma_addr);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002289
2290 /* set ring base address and size for each segment table entry */
2291 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2292 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
Matt Evans28ccd292011-03-29 13:40:46 +11002293 entry->seg_addr = cpu_to_le64(seg->dma);
2294 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002295 entry->rsvd = 0;
2296 seg = seg->next;
2297 }
2298
2299 /* set ERST count with the number of entries in the segment table */
2300 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2301 val &= ERST_SIZE_MASK;
2302 val |= ERST_NUM_SEGS;
2303 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
2304 val);
2305 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2306
2307 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
2308 /* set the segment table base address */
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002309 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
2310 (unsigned long long)xhci->erst.erst_dma_addr);
Sarah Sharp8e595a52009-07-27 12:03:31 -07002311 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2312 val_64 &= ERST_PTR_MASK;
2313 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2314 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002315
2316 /* Set the event ring dequeue address */
Sarah Sharp23e3be12009-04-29 19:05:20 -07002317 xhci_set_hc_event_deq(xhci);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002318 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
Dmitry Torokhov09ece302011-02-08 16:29:33 -08002319 xhci_print_ir_set(xhci, 0);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002320
2321 /*
2322 * XXX: Might need to set the Interrupter Moderation Register to
2323 * something other than the default (~1ms minimum between interrupts).
2324 * See section 5.5.1.2.
2325 */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002326 init_completion(&xhci->addr_dev);
2327 for (i = 0; i < MAX_HC_SLOTS; ++i)
Randy Dunlap326b4812010-04-19 08:53:50 -07002328 xhci->devs[i] = NULL;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08002329 for (i = 0; i < USB_MAXCHILDREN; ++i) {
Sarah Sharp20b67cf2010-12-15 12:47:14 -08002330 xhci->bus_state[0].resume_done[i] = 0;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08002331 xhci->bus_state[1].resume_done[i] = 0;
2332 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002333
John Youn254c80a2009-07-27 12:05:03 -07002334 if (scratchpad_alloc(xhci, flags))
2335 goto fail;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002336 if (xhci_setup_port_arrays(xhci, flags))
2337 goto fail;
John Youn254c80a2009-07-27 12:05:03 -07002338
Andiry Xu95743232011-09-23 14:19:51 -07002339 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2340
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002341 return 0;
John Youn254c80a2009-07-27 12:05:03 -07002342
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002343fail:
2344 xhci_warn(xhci, "Couldn't initialize memory\n");
2345 xhci_mem_cleanup(xhci);
2346 return -ENOMEM;
2347}