blob: 965b539bc47410721c9021f41eb7bf051dcc8feb [file] [log] [blame]
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/usb.h>
Sarah Sharp0ebbab32009-04-27 19:52:34 -070024#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Sarah Sharp527c6d72009-04-29 19:06:56 -070026#include <linux/dmapool.h>
Sarah Sharp66d4ead2009-04-27 19:52:28 -070027
28#include "xhci.h"
29
Sarah Sharp0ebbab32009-04-27 19:52:34 -070030/*
31 * Allocates a generic ring segment from the ring pool, sets the dma address,
32 * initializes the segment to zero, and sets the private next pointer to NULL.
33 *
34 * Section 4.11.1.1:
35 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
36 */
Andiry Xu186a7ef2012-03-05 17:49:36 +080037static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
38 unsigned int cycle_state, gfp_t flags)
Sarah Sharp0ebbab32009-04-27 19:52:34 -070039{
40 struct xhci_segment *seg;
41 dma_addr_t dma;
Andiry Xu186a7ef2012-03-05 17:49:36 +080042 int i;
Sarah Sharp0ebbab32009-04-27 19:52:34 -070043
44 seg = kzalloc(sizeof *seg, flags);
45 if (!seg)
Randy Dunlap326b4812010-04-19 08:53:50 -070046 return NULL;
Sarah Sharp0ebbab32009-04-27 19:52:34 -070047
48 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
49 if (!seg->trbs) {
50 kfree(seg);
Randy Dunlap326b4812010-04-19 08:53:50 -070051 return NULL;
Sarah Sharp0ebbab32009-04-27 19:52:34 -070052 }
Sarah Sharp0ebbab32009-04-27 19:52:34 -070053
David Howellseb8ccd22013-03-28 18:48:35 +000054 memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
Andiry Xu186a7ef2012-03-05 17:49:36 +080055 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
56 if (cycle_state == 0) {
57 for (i = 0; i < TRBS_PER_SEGMENT; i++)
58 seg->trbs[i].link.control |= TRB_CYCLE;
59 }
Sarah Sharp0ebbab32009-04-27 19:52:34 -070060 seg->dma = dma;
61 seg->next = NULL;
62
63 return seg;
64}
65
66static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
67{
Sarah Sharp0ebbab32009-04-27 19:52:34 -070068 if (seg->trbs) {
Sarah Sharp0ebbab32009-04-27 19:52:34 -070069 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
70 seg->trbs = NULL;
71 }
Sarah Sharp0ebbab32009-04-27 19:52:34 -070072 kfree(seg);
73}
74
Andiry Xu70d43602012-03-05 17:49:35 +080075static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
76 struct xhci_segment *first)
77{
78 struct xhci_segment *seg;
79
80 seg = first->next;
81 while (seg != first) {
82 struct xhci_segment *next = seg->next;
83 xhci_segment_free(xhci, seg);
84 seg = next;
85 }
86 xhci_segment_free(xhci, first);
87}
88
Sarah Sharp0ebbab32009-04-27 19:52:34 -070089/*
90 * Make the prev segment point to the next segment.
91 *
92 * Change the last TRB in the prev segment to be a Link TRB which points to the
93 * DMA address of the next segment. The caller needs to set any Link TRB
94 * related flags, such as End TRB, Toggle Cycle, and no snoop.
95 */
96static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
Andiry Xu3b72fca2012-03-05 17:49:32 +080097 struct xhci_segment *next, enum xhci_ring_type type)
Sarah Sharp0ebbab32009-04-27 19:52:34 -070098{
99 u32 val;
100
101 if (!prev || !next)
102 return;
103 prev->next = next;
Andiry Xu3b72fca2012-03-05 17:49:32 +0800104 if (type != TYPE_EVENT) {
Matt Evansf5960b62011-06-01 10:22:55 +1000105 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
106 cpu_to_le64(next->dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700107
108 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
Matt Evans28ccd292011-03-29 13:40:46 +1100109 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700110 val &= ~TRB_TYPE_BITMASK;
111 val |= TRB_TYPE(TRB_LINK);
Sarah Sharpb0567b32009-08-07 14:04:36 -0700112 /* Always set the chain bit with 0.95 hardware */
Andiry Xu7e393a82011-09-23 14:19:54 -0700113 /* Set chain bit for isoc rings on AMD 0.96 host */
114 if (xhci_link_trb_quirk(xhci) ||
Andiry Xu3b72fca2012-03-05 17:49:32 +0800115 (type == TYPE_ISOC &&
116 (xhci->quirks & XHCI_AMD_0x96_HOST)))
Sarah Sharpb0567b32009-08-07 14:04:36 -0700117 val |= TRB_CHAIN;
Matt Evans28ccd292011-03-29 13:40:46 +1100118 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700119 }
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700120}
121
Andiry Xu8dfec612012-03-05 17:49:37 +0800122/*
123 * Link the ring to the new segments.
124 * Set Toggle Cycle for the new ring if needed.
125 */
126static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
127 struct xhci_segment *first, struct xhci_segment *last,
128 unsigned int num_segs)
129{
130 struct xhci_segment *next;
131
132 if (!ring || !first || !last)
133 return;
134
135 next = ring->enq_seg->next;
136 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
137 xhci_link_segments(xhci, last, next, ring->type);
138 ring->num_segs += num_segs;
139 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
140
141 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
142 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
143 &= ~cpu_to_le32(LINK_TOGGLE);
144 last->trbs[TRBS_PER_SEGMENT-1].link.control
145 |= cpu_to_le32(LINK_TOGGLE);
146 ring->last_seg = last;
147 }
148}
149
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700150/* XXX: Do we need the hcd structure in all these functions? */
Sarah Sharpf94e01862009-04-27 19:58:38 -0700151void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700152{
Kautuk Consul0e6c7f72011-09-19 16:53:12 -0700153 if (!ring)
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700154 return;
Andiry Xu70d43602012-03-05 17:49:35 +0800155
156 if (ring->first_seg)
157 xhci_free_segments_for_ring(xhci, ring->first_seg);
158
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700159 kfree(ring);
160}
161
Andiry Xu186a7ef2012-03-05 17:49:36 +0800162static void xhci_initialize_ring_info(struct xhci_ring *ring,
163 unsigned int cycle_state)
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800164{
165 /* The ring is empty, so the enqueue pointer == dequeue pointer */
166 ring->enqueue = ring->first_seg->trbs;
167 ring->enq_seg = ring->first_seg;
168 ring->dequeue = ring->enqueue;
169 ring->deq_seg = ring->first_seg;
170 /* The ring is initialized to 0. The producer must write 1 to the cycle
171 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
172 * compare CCS to the cycle bit to check ownership, so CCS = 1.
Andiry Xu186a7ef2012-03-05 17:49:36 +0800173 *
174 * New rings are initialized with cycle state equal to 1; if we are
175 * handling ring expansion, set the cycle state equal to the old ring.
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800176 */
Andiry Xu186a7ef2012-03-05 17:49:36 +0800177 ring->cycle_state = cycle_state;
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800178 /* Not necessary for new rings, but needed for re-initialized rings */
179 ring->enq_updates = 0;
180 ring->deq_updates = 0;
Andiry Xub008df62012-03-05 17:49:34 +0800181
182 /*
183 * Each segment has a link TRB, and leave an extra TRB for SW
184 * accounting purpose
185 */
186 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800187}
188
Andiry Xu70d43602012-03-05 17:49:35 +0800189/* Allocate segments and link them for a ring */
190static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
191 struct xhci_segment **first, struct xhci_segment **last,
Andiry Xu186a7ef2012-03-05 17:49:36 +0800192 unsigned int num_segs, unsigned int cycle_state,
193 enum xhci_ring_type type, gfp_t flags)
Andiry Xu70d43602012-03-05 17:49:35 +0800194{
195 struct xhci_segment *prev;
196
Andiry Xu186a7ef2012-03-05 17:49:36 +0800197 prev = xhci_segment_alloc(xhci, cycle_state, flags);
Andiry Xu70d43602012-03-05 17:49:35 +0800198 if (!prev)
199 return -ENOMEM;
200 num_segs--;
201
202 *first = prev;
203 while (num_segs > 0) {
204 struct xhci_segment *next;
205
Andiry Xu186a7ef2012-03-05 17:49:36 +0800206 next = xhci_segment_alloc(xhci, cycle_state, flags);
Andiry Xu70d43602012-03-05 17:49:35 +0800207 if (!next) {
Julius Werner68e52542012-11-01 12:47:59 -0700208 prev = *first;
209 while (prev) {
210 next = prev->next;
211 xhci_segment_free(xhci, prev);
212 prev = next;
213 }
Andiry Xu70d43602012-03-05 17:49:35 +0800214 return -ENOMEM;
215 }
216 xhci_link_segments(xhci, prev, next, type);
217
218 prev = next;
219 num_segs--;
220 }
221 xhci_link_segments(xhci, prev, *first, type);
222 *last = prev;
223
224 return 0;
225}
226
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700227/**
228 * Create a new ring with zero or more segments.
229 *
230 * Link each segment together into a ring.
231 * Set the end flag and the cycle toggle bit on the last segment.
232 * See section 4.9.1 and figures 15 and 16.
233 */
234static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
Andiry Xu186a7ef2012-03-05 17:49:36 +0800235 unsigned int num_segs, unsigned int cycle_state,
236 enum xhci_ring_type type, gfp_t flags)
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700237{
238 struct xhci_ring *ring;
Andiry Xu70d43602012-03-05 17:49:35 +0800239 int ret;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700240
241 ring = kzalloc(sizeof *(ring), flags);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700242 if (!ring)
Randy Dunlap326b4812010-04-19 08:53:50 -0700243 return NULL;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700244
Andiry Xu3fe4fe02012-03-05 17:49:33 +0800245 ring->num_segs = num_segs;
Sarah Sharpd0e96f52009-04-27 19:58:01 -0700246 INIT_LIST_HEAD(&ring->td_list);
Andiry Xu3b72fca2012-03-05 17:49:32 +0800247 ring->type = type;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700248 if (num_segs == 0)
249 return ring;
250
Andiry Xu70d43602012-03-05 17:49:35 +0800251 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
Andiry Xu186a7ef2012-03-05 17:49:36 +0800252 &ring->last_seg, num_segs, cycle_state, type, flags);
Andiry Xu70d43602012-03-05 17:49:35 +0800253 if (ret)
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700254 goto fail;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700255
Andiry Xu3b72fca2012-03-05 17:49:32 +0800256 /* Only event ring does not use link TRB */
257 if (type != TYPE_EVENT) {
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700258 /* See section 4.9.2.1 and 6.4.4.1 */
Andiry Xu70d43602012-03-05 17:49:35 +0800259 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
Matt Evansf5960b62011-06-01 10:22:55 +1000260 cpu_to_le32(LINK_TOGGLE);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700261 }
Andiry Xu186a7ef2012-03-05 17:49:36 +0800262 xhci_initialize_ring_info(ring, cycle_state);
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700263 return ring;
264
265fail:
Julius Werner68e52542012-11-01 12:47:59 -0700266 kfree(ring);
Randy Dunlap326b4812010-04-19 08:53:50 -0700267 return NULL;
Sarah Sharp0ebbab32009-04-27 19:52:34 -0700268}
269
Sarah Sharp412566b2009-12-09 15:59:01 -0800270void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
271 struct xhci_virt_device *virt_dev,
272 unsigned int ep_index)
273{
274 int rings_cached;
275
276 rings_cached = virt_dev->num_rings_cached;
277 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
Sarah Sharp412566b2009-12-09 15:59:01 -0800278 virt_dev->ring_cache[rings_cached] =
279 virt_dev->eps[ep_index].ring;
Sarah Sharp30f89ca2011-05-16 13:09:08 -0700280 virt_dev->num_rings_cached++;
Sarah Sharp412566b2009-12-09 15:59:01 -0800281 xhci_dbg(xhci, "Cached old ring, "
282 "%d ring%s cached\n",
Sarah Sharp30f89ca2011-05-16 13:09:08 -0700283 virt_dev->num_rings_cached,
284 (virt_dev->num_rings_cached > 1) ? "s" : "");
Sarah Sharp412566b2009-12-09 15:59:01 -0800285 } else {
286 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
287 xhci_dbg(xhci, "Ring cache full (%d rings), "
288 "freeing ring\n",
289 virt_dev->num_rings_cached);
290 }
291 virt_dev->eps[ep_index].ring = NULL;
292}
293
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800294/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
295 * pointers to the beginning of the ring.
296 */
297static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
Andiry Xu186a7ef2012-03-05 17:49:36 +0800298 struct xhci_ring *ring, unsigned int cycle_state,
299 enum xhci_ring_type type)
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800300{
301 struct xhci_segment *seg = ring->first_seg;
Andiry Xu186a7ef2012-03-05 17:49:36 +0800302 int i;
303
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800304 do {
305 memset(seg->trbs, 0,
306 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
Andiry Xu186a7ef2012-03-05 17:49:36 +0800307 if (cycle_state == 0) {
308 for (i = 0; i < TRBS_PER_SEGMENT; i++)
309 seg->trbs[i].link.control |= TRB_CYCLE;
310 }
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800311 /* All endpoint rings have link TRBs */
Andiry Xu3b72fca2012-03-05 17:49:32 +0800312 xhci_link_segments(xhci, seg, seg->next, type);
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800313 seg = seg->next;
314 } while (seg != ring->first_seg);
Andiry Xu3b72fca2012-03-05 17:49:32 +0800315 ring->type = type;
Andiry Xu186a7ef2012-03-05 17:49:36 +0800316 xhci_initialize_ring_info(ring, cycle_state);
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800317 /* td list should be empty since all URBs have been cancelled,
318 * but just in case...
319 */
320 INIT_LIST_HEAD(&ring->td_list);
321}
322
Andiry Xu8dfec612012-03-05 17:49:37 +0800323/*
324 * Expand an existing ring.
325 * Look for a cached ring or allocate a new ring which has same segment numbers
326 * and link the two rings.
327 */
328int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
329 unsigned int num_trbs, gfp_t flags)
330{
331 struct xhci_segment *first;
332 struct xhci_segment *last;
333 unsigned int num_segs;
334 unsigned int num_segs_needed;
335 int ret;
336
337 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
338 (TRBS_PER_SEGMENT - 1);
339
340 /* Allocate number of segments we needed, or double the ring size */
341 num_segs = ring->num_segs > num_segs_needed ?
342 ring->num_segs : num_segs_needed;
343
344 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
345 num_segs, ring->cycle_state, ring->type, flags);
346 if (ret)
347 return -ENOMEM;
348
349 xhci_link_rings(xhci, ring, first, last, num_segs);
350 xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
351 ring->num_segs);
352
353 return 0;
354}
355
John Yound115b042009-07-27 12:05:15 -0700356#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
357
Randy Dunlap326b4812010-04-19 08:53:50 -0700358static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
John Yound115b042009-07-27 12:05:15 -0700359 int type, gfp_t flags)
360{
361 struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
362 if (!ctx)
363 return NULL;
364
365 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
366 ctx->type = type;
367 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
368 if (type == XHCI_CTX_TYPE_INPUT)
369 ctx->size += CTX_SIZE(xhci->hcc_params);
370
371 ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
372 memset(ctx->bytes, 0, ctx->size);
373 return ctx;
374}
375
Randy Dunlap326b4812010-04-19 08:53:50 -0700376static void xhci_free_container_ctx(struct xhci_hcd *xhci,
John Yound115b042009-07-27 12:05:15 -0700377 struct xhci_container_ctx *ctx)
378{
Sarah Sharpa1d78c12009-12-09 15:59:03 -0800379 if (!ctx)
380 return;
John Yound115b042009-07-27 12:05:15 -0700381 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
382 kfree(ctx);
383}
384
385struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
386 struct xhci_container_ctx *ctx)
387{
388 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
389 return (struct xhci_input_control_ctx *)ctx->bytes;
390}
391
392struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
393 struct xhci_container_ctx *ctx)
394{
395 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
396 return (struct xhci_slot_ctx *)ctx->bytes;
397
398 return (struct xhci_slot_ctx *)
399 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
400}
401
402struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
403 struct xhci_container_ctx *ctx,
404 unsigned int ep_index)
405{
406 /* increment ep index by offset of start of ep ctx array */
407 ep_index++;
408 if (ctx->type == XHCI_CTX_TYPE_INPUT)
409 ep_index++;
410
411 return (struct xhci_ep_ctx *)
412 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
413}
414
Sarah Sharp8df75f42010-04-02 15:34:16 -0700415
416/***************** Streams structures manipulation *************************/
417
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800418static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
Sarah Sharp8df75f42010-04-02 15:34:16 -0700419 unsigned int num_stream_ctxs,
420 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
421{
422 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
423
424 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -0700425 dma_free_coherent(&pdev->dev,
Sarah Sharp8df75f42010-04-02 15:34:16 -0700426 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
427 stream_ctx, dma);
428 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
429 return dma_pool_free(xhci->small_streams_pool,
430 stream_ctx, dma);
431 else
432 return dma_pool_free(xhci->medium_streams_pool,
433 stream_ctx, dma);
434}
435
436/*
437 * The stream context array for each endpoint with bulk streams enabled can
438 * vary in size, based on:
439 * - how many streams the endpoint supports,
440 * - the maximum primary stream array size the host controller supports,
441 * - and how many streams the device driver asks for.
442 *
443 * The stream context array must be a power of 2, and can be as small as
444 * 64 bytes or as large as 1MB.
445 */
Dmitry Torokhov8212a492011-02-08 13:55:59 -0800446static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
Sarah Sharp8df75f42010-04-02 15:34:16 -0700447 unsigned int num_stream_ctxs, dma_addr_t *dma,
448 gfp_t mem_flags)
449{
450 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
451
452 if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -0700453 return dma_alloc_coherent(&pdev->dev,
Sarah Sharp8df75f42010-04-02 15:34:16 -0700454 sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -0700455 dma, mem_flags);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700456 else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
457 return dma_pool_alloc(xhci->small_streams_pool,
458 mem_flags, dma);
459 else
460 return dma_pool_alloc(xhci->medium_streams_pool,
461 mem_flags, dma);
462}
463
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700464struct xhci_ring *xhci_dma_to_transfer_ring(
465 struct xhci_virt_ep *ep,
466 u64 address)
467{
468 if (ep->ep_state & EP_HAS_STREAMS)
469 return radix_tree_lookup(&ep->stream_info->trb_address_map,
David Howellseb8ccd22013-03-28 18:48:35 +0000470 address >> TRB_SEGMENT_SHIFT);
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700471 return ep->ring;
472}
473
474/* Only use this when you know stream_info is valid */
Sarah Sharp8df75f42010-04-02 15:34:16 -0700475#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700476static struct xhci_ring *dma_to_stream_ring(
Sarah Sharp8df75f42010-04-02 15:34:16 -0700477 struct xhci_stream_info *stream_info,
478 u64 address)
479{
480 return radix_tree_lookup(&stream_info->trb_address_map,
David Howellseb8ccd22013-03-28 18:48:35 +0000481 address >> TRB_SEGMENT_SHIFT);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700482}
483#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
484
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700485struct xhci_ring *xhci_stream_id_to_ring(
486 struct xhci_virt_device *dev,
487 unsigned int ep_index,
488 unsigned int stream_id)
489{
490 struct xhci_virt_ep *ep = &dev->eps[ep_index];
491
492 if (stream_id == 0)
493 return ep->ring;
494 if (!ep->stream_info)
495 return NULL;
496
497 if (stream_id > ep->stream_info->num_streams)
498 return NULL;
499 return ep->stream_info->stream_rings[stream_id];
500}
501
Sarah Sharp8df75f42010-04-02 15:34:16 -0700502#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
503static int xhci_test_radix_tree(struct xhci_hcd *xhci,
504 unsigned int num_streams,
505 struct xhci_stream_info *stream_info)
506{
507 u32 cur_stream;
508 struct xhci_ring *cur_ring;
509 u64 addr;
510
511 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
512 struct xhci_ring *mapped_ring;
513 int trb_size = sizeof(union xhci_trb);
514
515 cur_ring = stream_info->stream_rings[cur_stream];
516 for (addr = cur_ring->first_seg->dma;
David Howellseb8ccd22013-03-28 18:48:35 +0000517 addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE;
Sarah Sharp8df75f42010-04-02 15:34:16 -0700518 addr += trb_size) {
519 mapped_ring = dma_to_stream_ring(stream_info, addr);
520 if (cur_ring != mapped_ring) {
521 xhci_warn(xhci, "WARN: DMA address 0x%08llx "
522 "didn't map to stream ID %u; "
523 "mapped to ring %p\n",
524 (unsigned long long) addr,
525 cur_stream,
526 mapped_ring);
527 return -EINVAL;
528 }
529 }
530 /* One TRB after the end of the ring segment shouldn't return a
531 * pointer to the current ring (although it may be a part of a
532 * different ring).
533 */
534 mapped_ring = dma_to_stream_ring(stream_info, addr);
535 if (mapped_ring != cur_ring) {
536 /* One TRB before should also fail */
537 addr = cur_ring->first_seg->dma - trb_size;
538 mapped_ring = dma_to_stream_ring(stream_info, addr);
539 }
540 if (mapped_ring == cur_ring) {
541 xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
542 "mapped to valid stream ID %u; "
543 "mapped ring = %p\n",
544 (unsigned long long) addr,
545 cur_stream,
546 mapped_ring);
547 return -EINVAL;
548 }
549 }
550 return 0;
551}
552#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
553
554/*
555 * Change an endpoint's internal structure so it supports stream IDs. The
556 * number of requested streams includes stream 0, which cannot be used by device
557 * drivers.
558 *
559 * The number of stream contexts in the stream context array may be bigger than
560 * the number of streams the driver wants to use. This is because the number of
561 * stream context array entries must be a power of two.
562 *
563 * We need a radix tree for mapping physical addresses of TRBs to which stream
564 * ID they belong to. We need to do this because the host controller won't tell
565 * us which stream ring the TRB came from. We could store the stream ID in an
566 * event data TRB, but that doesn't help us for the cancellation case, since the
567 * endpoint may stop before it reaches that event data TRB.
568 *
569 * The radix tree maps the upper portion of the TRB DMA address to a ring
570 * segment that has the same upper portion of DMA addresses. For example, say I
571 * have segments of size 1KB, that are always 64-byte aligned. A segment may
572 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
573 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
574 * pass the radix tree a key to get the right stream ID:
575 *
576 * 0x10c90fff >> 10 = 0x43243
577 * 0x10c912c0 >> 10 = 0x43244
578 * 0x10c91400 >> 10 = 0x43245
579 *
580 * Obviously, only those TRBs with DMA addresses that are within the segment
581 * will make the radix tree return the stream ID for that ring.
582 *
583 * Caveats for the radix tree:
584 *
585 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
586 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
587 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
588 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
589 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
590 * extended systems (where the DMA address can be bigger than 32-bits),
591 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
592 */
593struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
594 unsigned int num_stream_ctxs,
595 unsigned int num_streams, gfp_t mem_flags)
596{
597 struct xhci_stream_info *stream_info;
598 u32 cur_stream;
599 struct xhci_ring *cur_ring;
600 unsigned long key;
601 u64 addr;
602 int ret;
603
604 xhci_dbg(xhci, "Allocating %u streams and %u "
605 "stream context array entries.\n",
606 num_streams, num_stream_ctxs);
607 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
608 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
609 return NULL;
610 }
611 xhci->cmd_ring_reserved_trbs++;
612
613 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
614 if (!stream_info)
615 goto cleanup_trbs;
616
617 stream_info->num_streams = num_streams;
618 stream_info->num_stream_ctxs = num_stream_ctxs;
619
620 /* Initialize the array of virtual pointers to stream rings. */
621 stream_info->stream_rings = kzalloc(
622 sizeof(struct xhci_ring *)*num_streams,
623 mem_flags);
624 if (!stream_info->stream_rings)
625 goto cleanup_info;
626
627 /* Initialize the array of DMA addresses for stream rings for the HW. */
628 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
629 num_stream_ctxs, &stream_info->ctx_array_dma,
630 mem_flags);
631 if (!stream_info->stream_ctx_array)
632 goto cleanup_ctx;
633 memset(stream_info->stream_ctx_array, 0,
634 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
635
636 /* Allocate everything needed to free the stream rings later */
637 stream_info->free_streams_command =
638 xhci_alloc_command(xhci, true, true, mem_flags);
639 if (!stream_info->free_streams_command)
640 goto cleanup_ctx;
641
642 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
643
644 /* Allocate rings for all the streams that the driver will use,
645 * and add their segment DMA addresses to the radix tree.
646 * Stream 0 is reserved.
647 */
648 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
649 stream_info->stream_rings[cur_stream] =
Andiry Xu2fdcd472012-03-05 17:49:39 +0800650 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700651 cur_ring = stream_info->stream_rings[cur_stream];
652 if (!cur_ring)
653 goto cleanup_rings;
Sarah Sharpe9df17e2010-04-02 15:34:43 -0700654 cur_ring->stream_id = cur_stream;
Sarah Sharp8df75f42010-04-02 15:34:16 -0700655 /* Set deq ptr, cycle bit, and stream context type */
656 addr = cur_ring->first_seg->dma |
657 SCT_FOR_CTX(SCT_PRI_TR) |
658 cur_ring->cycle_state;
Matt Evansf5960b62011-06-01 10:22:55 +1000659 stream_info->stream_ctx_array[cur_stream].stream_ring =
660 cpu_to_le64(addr);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700661 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
662 cur_stream, (unsigned long long) addr);
663
664 key = (unsigned long)
David Howellseb8ccd22013-03-28 18:48:35 +0000665 (cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700666 ret = radix_tree_insert(&stream_info->trb_address_map,
667 key, cur_ring);
668 if (ret) {
669 xhci_ring_free(xhci, cur_ring);
670 stream_info->stream_rings[cur_stream] = NULL;
671 goto cleanup_rings;
672 }
673 }
674 /* Leave the other unused stream ring pointers in the stream context
675 * array initialized to zero. This will cause the xHC to give us an
676 * error if the device asks for a stream ID we don't have setup (if it
677 * was any other way, the host controller would assume the ring is
678 * "empty" and wait forever for data to be queued to that stream ID).
679 */
680#if XHCI_DEBUG
681 /* Do a little test on the radix tree to make sure it returns the
682 * correct values.
683 */
684 if (xhci_test_radix_tree(xhci, num_streams, stream_info))
685 goto cleanup_rings;
686#endif
687
688 return stream_info;
689
690cleanup_rings:
691 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
692 cur_ring = stream_info->stream_rings[cur_stream];
693 if (cur_ring) {
694 addr = cur_ring->first_seg->dma;
695 radix_tree_delete(&stream_info->trb_address_map,
David Howellseb8ccd22013-03-28 18:48:35 +0000696 addr >> TRB_SEGMENT_SHIFT);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700697 xhci_ring_free(xhci, cur_ring);
698 stream_info->stream_rings[cur_stream] = NULL;
699 }
700 }
701 xhci_free_command(xhci, stream_info->free_streams_command);
702cleanup_ctx:
703 kfree(stream_info->stream_rings);
704cleanup_info:
705 kfree(stream_info);
706cleanup_trbs:
707 xhci->cmd_ring_reserved_trbs--;
708 return NULL;
709}
710/*
711 * Sets the MaxPStreams field and the Linear Stream Array field.
712 * Sets the dequeue pointer to the stream context array.
713 */
714void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
715 struct xhci_ep_ctx *ep_ctx,
716 struct xhci_stream_info *stream_info)
717{
718 u32 max_primary_streams;
719 /* MaxPStreams is the number of stream context array entries, not the
720 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
721 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
722 */
723 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
724 xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
725 1 << (max_primary_streams + 1));
Matt Evans28ccd292011-03-29 13:40:46 +1100726 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
727 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
728 | EP_HAS_LSA);
729 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700730}
731
732/*
733 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
734 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
735 * not at the beginning of the ring).
736 */
737void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
738 struct xhci_ep_ctx *ep_ctx,
739 struct xhci_virt_ep *ep)
740{
741 dma_addr_t addr;
Matt Evans28ccd292011-03-29 13:40:46 +1100742 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
Sarah Sharp8df75f42010-04-02 15:34:16 -0700743 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
Matt Evans28ccd292011-03-29 13:40:46 +1100744 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700745}
746
747/* Frees all stream contexts associated with the endpoint,
748 *
749 * Caller should fix the endpoint context streams fields.
750 */
751void xhci_free_stream_info(struct xhci_hcd *xhci,
752 struct xhci_stream_info *stream_info)
753{
754 int cur_stream;
755 struct xhci_ring *cur_ring;
756 dma_addr_t addr;
757
758 if (!stream_info)
759 return;
760
761 for (cur_stream = 1; cur_stream < stream_info->num_streams;
762 cur_stream++) {
763 cur_ring = stream_info->stream_rings[cur_stream];
764 if (cur_ring) {
765 addr = cur_ring->first_seg->dma;
766 radix_tree_delete(&stream_info->trb_address_map,
David Howellseb8ccd22013-03-28 18:48:35 +0000767 addr >> TRB_SEGMENT_SHIFT);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700768 xhci_ring_free(xhci, cur_ring);
769 stream_info->stream_rings[cur_stream] = NULL;
770 }
771 }
772 xhci_free_command(xhci, stream_info->free_streams_command);
773 xhci->cmd_ring_reserved_trbs--;
774 if (stream_info->stream_ctx_array)
775 xhci_free_stream_ctx(xhci,
776 stream_info->num_stream_ctxs,
777 stream_info->stream_ctx_array,
778 stream_info->ctx_array_dma);
779
780 if (stream_info)
781 kfree(stream_info->stream_rings);
782 kfree(stream_info);
783}
784
785
786/***************** Device context manipulation *************************/
787
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700788static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
789 struct xhci_virt_ep *ep)
790{
791 init_timer(&ep->stop_cmd_timer);
792 ep->stop_cmd_timer.data = (unsigned long) ep;
793 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
794 ep->xhci = xhci;
795}
796
Sarah Sharp839c8172011-09-02 11:05:47 -0700797static void xhci_free_tt_info(struct xhci_hcd *xhci,
798 struct xhci_virt_device *virt_dev,
799 int slot_id)
800{
Sarah Sharp839c8172011-09-02 11:05:47 -0700801 struct list_head *tt_list_head;
Takashi Iwai46ed8f02012-06-01 10:06:23 +0200802 struct xhci_tt_bw_info *tt_info, *next;
803 bool slot_found = false;
Sarah Sharp839c8172011-09-02 11:05:47 -0700804
805 /* If the device never made it past the Set Address stage,
806 * it may not have the real_port set correctly.
807 */
808 if (virt_dev->real_port == 0 ||
809 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
810 xhci_dbg(xhci, "Bad real port.\n");
811 return;
812 }
813
814 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
Takashi Iwai46ed8f02012-06-01 10:06:23 +0200815 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
816 /* Multi-TT hubs will have more than one entry */
817 if (tt_info->slot_id == slot_id) {
818 slot_found = true;
819 list_del(&tt_info->tt_list);
820 kfree(tt_info);
821 } else if (slot_found) {
Sarah Sharp839c8172011-09-02 11:05:47 -0700822 break;
Takashi Iwai46ed8f02012-06-01 10:06:23 +0200823 }
Sarah Sharp839c8172011-09-02 11:05:47 -0700824 }
Sarah Sharp839c8172011-09-02 11:05:47 -0700825}
826
827int xhci_alloc_tt_info(struct xhci_hcd *xhci,
828 struct xhci_virt_device *virt_dev,
829 struct usb_device *hdev,
830 struct usb_tt *tt, gfp_t mem_flags)
831{
832 struct xhci_tt_bw_info *tt_info;
833 unsigned int num_ports;
834 int i, j;
835
836 if (!tt->multi)
837 num_ports = 1;
838 else
839 num_ports = hdev->maxchild;
840
841 for (i = 0; i < num_ports; i++, tt_info++) {
842 struct xhci_interval_bw_table *bw_table;
843
844 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
845 if (!tt_info)
846 goto free_tts;
847 INIT_LIST_HEAD(&tt_info->tt_list);
848 list_add(&tt_info->tt_list,
849 &xhci->rh_bw[virt_dev->real_port - 1].tts);
850 tt_info->slot_id = virt_dev->udev->slot_id;
851 if (tt->multi)
852 tt_info->ttport = i+1;
853 bw_table = &tt_info->bw_table;
854 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
855 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
856 }
857 return 0;
858
859free_tts:
860 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
861 return -ENOMEM;
862}
863
864
865/* All the xhci_tds in the ring's TD list should be freed at this point.
866 * Should be called with xhci->lock held if there is any chance the TT lists
867 * will be manipulated by the configure endpoint, allocate device, or update
868 * hub functions while this function is removing the TT entries from the list.
869 */
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700870void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
871{
872 struct xhci_virt_device *dev;
873 int i;
Sarah Sharp2e279802011-09-02 11:05:50 -0700874 int old_active_eps = 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700875
876 /* Slot ID 0 is reserved */
877 if (slot_id == 0 || !xhci->devs[slot_id])
878 return;
879
880 dev = xhci->devs[slot_id];
Sarah Sharp8e595a52009-07-27 12:03:31 -0700881 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700882 if (!dev)
883 return;
884
Sarah Sharp2e279802011-09-02 11:05:50 -0700885 if (dev->tt_info)
886 old_active_eps = dev->tt_info->active_eps;
887
Sarah Sharp8df75f42010-04-02 15:34:16 -0700888 for (i = 0; i < 31; ++i) {
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700889 if (dev->eps[i].ring)
890 xhci_ring_free(xhci, dev->eps[i].ring);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700891 if (dev->eps[i].stream_info)
892 xhci_free_stream_info(xhci,
893 dev->eps[i].stream_info);
Sarah Sharp2e279802011-09-02 11:05:50 -0700894 /* Endpoints on the TT/root port lists should have been removed
895 * when usb_disable_device() was called for the device.
896 * We can't drop them anyway, because the udev might have gone
897 * away by this point, and we can't tell what speed it was.
898 */
899 if (!list_empty(&dev->eps[i].bw_endpoint_list))
900 xhci_warn(xhci, "Slot %u endpoint %u "
901 "not removed from BW list!\n",
902 slot_id, i);
Sarah Sharp8df75f42010-04-02 15:34:16 -0700903 }
Sarah Sharp839c8172011-09-02 11:05:47 -0700904 /* If this is a hub, free the TT(s) from the TT list */
905 xhci_free_tt_info(xhci, dev, slot_id);
Sarah Sharp2e279802011-09-02 11:05:50 -0700906 /* If necessary, update the number of active TTs on this root port */
907 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700908
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800909 if (dev->ring_cache) {
910 for (i = 0; i < dev->num_rings_cached; i++)
911 xhci_ring_free(xhci, dev->ring_cache[i]);
912 kfree(dev->ring_cache);
913 }
914
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700915 if (dev->in_ctx)
John Yound115b042009-07-27 12:05:15 -0700916 xhci_free_container_ctx(xhci, dev->in_ctx);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700917 if (dev->out_ctx)
John Yound115b042009-07-27 12:05:15 -0700918 xhci_free_container_ctx(xhci, dev->out_ctx);
919
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700920 kfree(xhci->devs[slot_id]);
Randy Dunlap326b4812010-04-19 08:53:50 -0700921 xhci->devs[slot_id] = NULL;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700922}
923
924int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
925 struct usb_device *udev, gfp_t flags)
926{
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700927 struct xhci_virt_device *dev;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700928 int i;
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700929
930 /* Slot ID 0 is reserved */
931 if (slot_id == 0 || xhci->devs[slot_id]) {
932 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
933 return 0;
934 }
935
936 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
937 if (!xhci->devs[slot_id])
938 return 0;
939 dev = xhci->devs[slot_id];
940
John Yound115b042009-07-27 12:05:15 -0700941 /* Allocate the (output) device context that will be used in the HC. */
942 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700943 if (!dev->out_ctx)
944 goto fail;
John Yound115b042009-07-27 12:05:15 -0700945
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700946 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
John Yound115b042009-07-27 12:05:15 -0700947 (unsigned long long)dev->out_ctx->dma);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700948
949 /* Allocate the (input) device context for address device command */
John Yound115b042009-07-27 12:05:15 -0700950 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700951 if (!dev->in_ctx)
952 goto fail;
John Yound115b042009-07-27 12:05:15 -0700953
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700954 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
John Yound115b042009-07-27 12:05:15 -0700955 (unsigned long long)dev->in_ctx->dma);
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700956
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700957 /* Initialize the cancellation list and watchdog timers for each ep */
958 for (i = 0; i < 31; i++) {
959 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700960 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
Sarah Sharp2e279802011-09-02 11:05:50 -0700961 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
Sarah Sharp6f5165c2009-10-27 10:57:01 -0700962 }
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700963
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700964 /* Allocate endpoint 0 ring */
Andiry Xu2fdcd472012-03-05 17:49:39 +0800965 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
Sarah Sharp63a0d9a2009-09-04 10:53:09 -0700966 if (!dev->eps[0].ring)
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700967 goto fail;
968
Sarah Sharp74f9fe22009-12-03 09:44:29 -0800969 /* Allocate pointers to the ring cache */
970 dev->ring_cache = kzalloc(
971 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
972 flags);
973 if (!dev->ring_cache)
974 goto fail;
975 dev->num_rings_cached = 0;
976
Sarah Sharpf94e01862009-04-27 19:58:38 -0700977 init_completion(&dev->cmd_completion);
Sarah Sharp913a8a32009-09-04 10:53:13 -0700978 INIT_LIST_HEAD(&dev->cmd_list);
Andiry Xu64927732010-10-14 07:22:45 -0700979 dev->udev = udev;
Sarah Sharpf94e01862009-04-27 19:58:38 -0700980
Sarah Sharp28c2d2e2009-07-27 12:05:08 -0700981 /* Point to output device context in dcbaa. */
Matt Evans28ccd292011-03-29 13:40:46 +1100982 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -0700983 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
Matt Evans28ccd292011-03-29 13:40:46 +1100984 slot_id,
985 &xhci->dcbaa->dev_context_ptrs[slot_id],
Matt Evansf5960b62011-06-01 10:22:55 +1000986 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
Sarah Sharp3ffbba92009-04-27 19:57:38 -0700987
988 return 1;
989fail:
990 xhci_free_virt_device(xhci, slot_id);
991 return 0;
992}
993
Sarah Sharp2d1ee592010-07-09 17:08:54 +0200994void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
995 struct usb_device *udev)
996{
997 struct xhci_virt_device *virt_dev;
998 struct xhci_ep_ctx *ep0_ctx;
999 struct xhci_ring *ep_ring;
1000
1001 virt_dev = xhci->devs[udev->slot_id];
1002 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1003 ep_ring = virt_dev->eps[0].ring;
1004 /*
1005 * FIXME we don't keep track of the dequeue pointer very well after a
1006 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1007 * host to our enqueue pointer. This should only be called after a
1008 * configured device has reset, so all control transfers should have
1009 * been completed or cancelled before the reset.
1010 */
Matt Evans28ccd292011-03-29 13:40:46 +11001011 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1012 ep_ring->enqueue)
1013 | ep_ring->cycle_state);
Sarah Sharp2d1ee592010-07-09 17:08:54 +02001014}
1015
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001016/*
1017 * The xHCI roothub may have ports of differing speeds in any order in the port
1018 * status registers. xhci->port_array provides an array of the port speed for
1019 * each offset into the port status registers.
1020 *
1021 * The xHCI hardware wants to know the roothub port number that the USB device
1022 * is attached to (or the roothub port its ancestor hub is attached to). All we
1023 * know is the index of that port under either the USB 2.0 or the USB 3.0
1024 * roothub, but that doesn't give us the real index into the HW port status
Lan Tianyu3f5eb142013-03-19 16:48:12 +08001025 * registers. Call xhci_find_raw_port_number() to get real index.
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001026 */
1027static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1028 struct usb_device *udev)
1029{
1030 struct usb_device *top_dev;
Lan Tianyu3f5eb142013-03-19 16:48:12 +08001031 struct usb_hcd *hcd;
1032
1033 if (udev->speed == USB_SPEED_SUPER)
1034 hcd = xhci->shared_hcd;
1035 else
1036 hcd = xhci->main_hcd;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001037
1038 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1039 top_dev = top_dev->parent)
1040 /* Found device below root hub */;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001041
Lan Tianyu3f5eb142013-03-19 16:48:12 +08001042 return xhci_find_raw_port_number(hcd, top_dev->portnum);
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001043}
1044
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001045/* Setup an xHCI virtual device for a Set Address command */
1046int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1047{
1048 struct xhci_virt_device *dev;
1049 struct xhci_ep_ctx *ep0_ctx;
John Yound115b042009-07-27 12:05:15 -07001050 struct xhci_slot_ctx *slot_ctx;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001051 u32 port_num;
1052 struct usb_device *top_dev;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001053
1054 dev = xhci->devs[udev->slot_id];
1055 /* Slot ID 0 is reserved */
1056 if (udev->slot_id == 0 || !dev) {
1057 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1058 udev->slot_id);
1059 return -EINVAL;
1060 }
John Yound115b042009-07-27 12:05:15 -07001061 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
John Yound115b042009-07-27 12:05:15 -07001062 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001063
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001064 /* 3) Only the control endpoint is valid - one endpoint context */
Matt Evansf5960b62011-06-01 10:22:55 +10001065 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001066 switch (udev->speed) {
1067 case USB_SPEED_SUPER:
Matt Evansf5960b62011-06-01 10:22:55 +10001068 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001069 break;
1070 case USB_SPEED_HIGH:
Matt Evansf5960b62011-06-01 10:22:55 +10001071 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001072 break;
1073 case USB_SPEED_FULL:
Matt Evansf5960b62011-06-01 10:22:55 +10001074 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001075 break;
1076 case USB_SPEED_LOW:
Matt Evansf5960b62011-06-01 10:22:55 +10001077 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001078 break;
Greg Kroah-Hartman551cdbb2010-01-14 11:08:04 -08001079 case USB_SPEED_WIRELESS:
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001080 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1081 return -EINVAL;
1082 break;
1083 default:
1084 /* Speed was set earlier, this shouldn't happen. */
1085 BUG();
1086 }
1087 /* Find the root hub port this device is under */
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001088 port_num = xhci_find_real_port_number(xhci, udev);
1089 if (!port_num)
1090 return -EINVAL;
Matt Evansf5960b62011-06-01 10:22:55 +10001091 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001092 /* Set the port number in the virtual_device to the faked port number */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001093 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1094 top_dev = top_dev->parent)
1095 /* Found device below root hub */;
Sarah Sharpfe301822011-09-02 11:05:41 -07001096 dev->fake_port = top_dev->portnum;
Sarah Sharp66381752011-09-02 11:05:45 -07001097 dev->real_port = port_num;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001098 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
Sarah Sharpfe301822011-09-02 11:05:41 -07001099 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001100
Sarah Sharp839c8172011-09-02 11:05:47 -07001101 /* Find the right bandwidth table that this device will be a part of.
1102 * If this is a full speed device attached directly to a root port (or a
1103 * decendent of one), it counts as a primary bandwidth domain, not a
1104 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1105 * will never be created for the HS root hub.
1106 */
1107 if (!udev->tt || !udev->tt->hub->parent) {
1108 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1109 } else {
1110 struct xhci_root_port_bw_info *rh_bw;
1111 struct xhci_tt_bw_info *tt_bw;
1112
1113 rh_bw = &xhci->rh_bw[port_num - 1];
1114 /* Find the right TT. */
1115 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1116 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1117 continue;
1118
1119 if (!dev->udev->tt->multi ||
1120 (udev->tt->multi &&
1121 tt_bw->ttport == dev->udev->ttport)) {
1122 dev->bw_table = &tt_bw->bw_table;
1123 dev->tt_info = tt_bw;
1124 break;
1125 }
1126 }
1127 if (!dev->tt_info)
1128 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1129 }
1130
Sarah Sharpaa1b13e2011-03-03 05:40:51 -08001131 /* Is this a LS/FS device under an external HS hub? */
1132 if (udev->tt && udev->tt->hub->parent) {
Matt Evans28ccd292011-03-29 13:40:46 +11001133 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1134 (udev->ttport << 8));
Sarah Sharp07b6de12009-09-04 10:53:19 -07001135 if (udev->tt->multi)
Matt Evans28ccd292011-03-29 13:40:46 +11001136 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001137 }
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07001138 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001139 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1140
1141 /* Step 4 - ring already allocated */
1142 /* Step 5 */
Matt Evans28ccd292011-03-29 13:40:46 +11001143 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001144 /*
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001145 * XXX: Not sure about wireless USB devices.
1146 */
Sarah Sharp47aded82009-08-07 14:04:46 -07001147 switch (udev->speed) {
1148 case USB_SPEED_SUPER:
Matt Evans28ccd292011-03-29 13:40:46 +11001149 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
Sarah Sharp47aded82009-08-07 14:04:46 -07001150 break;
1151 case USB_SPEED_HIGH:
1152 /* USB core guesses at a 64-byte max packet first for FS devices */
1153 case USB_SPEED_FULL:
Matt Evans28ccd292011-03-29 13:40:46 +11001154 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
Sarah Sharp47aded82009-08-07 14:04:46 -07001155 break;
1156 case USB_SPEED_LOW:
Matt Evans28ccd292011-03-29 13:40:46 +11001157 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
Sarah Sharp47aded82009-08-07 14:04:46 -07001158 break;
Greg Kroah-Hartman551cdbb2010-01-14 11:08:04 -08001159 case USB_SPEED_WIRELESS:
Sarah Sharp47aded82009-08-07 14:04:46 -07001160 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1161 return -EINVAL;
1162 break;
1163 default:
1164 /* New speed? */
1165 BUG();
1166 }
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001167 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
Matt Evans28ccd292011-03-29 13:40:46 +11001168 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001169
Matt Evans28ccd292011-03-29 13:40:46 +11001170 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1171 dev->eps[0].ring->cycle_state);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001172
1173 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1174
1175 return 0;
1176}
1177
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001178/*
1179 * Convert interval expressed as 2^(bInterval - 1) == interval into
1180 * straight exponent value 2^n == interval.
1181 *
1182 */
1183static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1184 struct usb_host_endpoint *ep)
1185{
1186 unsigned int interval;
1187
1188 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1189 if (interval != ep->desc.bInterval - 1)
1190 dev_warn(&udev->dev,
Dmitry Torokhovcd3c18b2011-05-31 14:37:23 -07001191 "ep %#x - rounding interval to %d %sframes\n",
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001192 ep->desc.bEndpointAddress,
Dmitry Torokhovcd3c18b2011-05-31 14:37:23 -07001193 1 << interval,
1194 udev->speed == USB_SPEED_FULL ? "" : "micro");
1195
1196 if (udev->speed == USB_SPEED_FULL) {
1197 /*
1198 * Full speed isoc endpoints specify interval in frames,
1199 * not microframes. We are using microframes everywhere,
1200 * so adjust accordingly.
1201 */
1202 interval += 3; /* 1 frame = 2^3 uframes */
1203 }
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001204
1205 return interval;
1206}
1207
1208/*
Sarah Sharp340a3502012-02-13 14:42:11 -08001209 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001210 * microframes, rounded down to nearest power of 2.
1211 */
Sarah Sharp340a3502012-02-13 14:42:11 -08001212static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1213 struct usb_host_endpoint *ep, unsigned int desc_interval,
1214 unsigned int min_exponent, unsigned int max_exponent)
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001215{
1216 unsigned int interval;
1217
Sarah Sharp340a3502012-02-13 14:42:11 -08001218 interval = fls(desc_interval) - 1;
1219 interval = clamp_val(interval, min_exponent, max_exponent);
1220 if ((1 << interval) != desc_interval)
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001221 dev_warn(&udev->dev,
1222 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1223 ep->desc.bEndpointAddress,
1224 1 << interval,
Sarah Sharp340a3502012-02-13 14:42:11 -08001225 desc_interval);
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001226
1227 return interval;
1228}
1229
Sarah Sharp340a3502012-02-13 14:42:11 -08001230static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1231 struct usb_host_endpoint *ep)
1232{
Sarah Sharp55c19452012-12-17 14:12:35 -08001233 if (ep->desc.bInterval == 0)
1234 return 0;
Sarah Sharp340a3502012-02-13 14:42:11 -08001235 return xhci_microframes_to_exponent(udev, ep,
1236 ep->desc.bInterval, 0, 15);
1237}
1238
1239
1240static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1241 struct usb_host_endpoint *ep)
1242{
1243 return xhci_microframes_to_exponent(udev, ep,
1244 ep->desc.bInterval * 8, 3, 10);
1245}
1246
Sarah Sharpf94e01862009-04-27 19:58:38 -07001247/* Return the polling or NAK interval.
1248 *
1249 * The polling interval is expressed in "microframes". If xHCI's Interval field
1250 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1251 *
1252 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1253 * is set to 0.
1254 */
Dmitry Torokhov575688e2011-03-20 02:15:16 -07001255static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
Sarah Sharpf94e01862009-04-27 19:58:38 -07001256 struct usb_host_endpoint *ep)
1257{
1258 unsigned int interval = 0;
1259
1260 switch (udev->speed) {
1261 case USB_SPEED_HIGH:
1262 /* Max NAK rate */
1263 if (usb_endpoint_xfer_control(&ep->desc) ||
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001264 usb_endpoint_xfer_bulk(&ep->desc)) {
Sarah Sharp340a3502012-02-13 14:42:11 -08001265 interval = xhci_parse_microframe_interval(udev, ep);
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001266 break;
1267 }
Sarah Sharpf94e01862009-04-27 19:58:38 -07001268 /* Fall through - SS and HS isoc/int have same decoding */
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001269
Sarah Sharpf94e01862009-04-27 19:58:38 -07001270 case USB_SPEED_SUPER:
1271 if (usb_endpoint_xfer_int(&ep->desc) ||
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001272 usb_endpoint_xfer_isoc(&ep->desc)) {
1273 interval = xhci_parse_exponent_interval(udev, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001274 }
1275 break;
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001276
Sarah Sharpf94e01862009-04-27 19:58:38 -07001277 case USB_SPEED_FULL:
Sarah Sharpb513d442011-05-13 13:10:01 -07001278 if (usb_endpoint_xfer_isoc(&ep->desc)) {
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001279 interval = xhci_parse_exponent_interval(udev, ep);
1280 break;
1281 }
1282 /*
Sarah Sharpb513d442011-05-13 13:10:01 -07001283 * Fall through for interrupt endpoint interval decoding
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001284 * since it uses the same rules as low speed interrupt
1285 * endpoints.
1286 */
1287
Sarah Sharpf94e01862009-04-27 19:58:38 -07001288 case USB_SPEED_LOW:
1289 if (usb_endpoint_xfer_int(&ep->desc) ||
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001290 usb_endpoint_xfer_isoc(&ep->desc)) {
1291
1292 interval = xhci_parse_frame_interval(udev, ep);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001293 }
1294 break;
Dmitry Torokhovdfa49c42011-03-23 22:41:23 -07001295
Sarah Sharpf94e01862009-04-27 19:58:38 -07001296 default:
1297 BUG();
1298 }
1299 return EP_INTERVAL(interval);
1300}
1301
Sarah Sharpc30c7912010-07-10 15:48:01 +02001302/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
Sarah Sharp1cf62242010-04-16 08:07:04 -07001303 * High speed endpoint descriptors can define "the number of additional
1304 * transaction opportunities per microframe", but that goes in the Max Burst
1305 * endpoint context field.
1306 */
Dmitry Torokhov575688e2011-03-20 02:15:16 -07001307static u32 xhci_get_endpoint_mult(struct usb_device *udev,
Sarah Sharp1cf62242010-04-16 08:07:04 -07001308 struct usb_host_endpoint *ep)
1309{
Sarah Sharpc30c7912010-07-10 15:48:01 +02001310 if (udev->speed != USB_SPEED_SUPER ||
1311 !usb_endpoint_xfer_isoc(&ep->desc))
Sarah Sharp1cf62242010-04-16 08:07:04 -07001312 return 0;
Alan Stern842f1692010-04-30 12:44:46 -04001313 return ep->ss_ep_comp.bmAttributes;
Sarah Sharp1cf62242010-04-16 08:07:04 -07001314}
1315
Dmitry Torokhov575688e2011-03-20 02:15:16 -07001316static u32 xhci_get_endpoint_type(struct usb_device *udev,
Sarah Sharpf94e01862009-04-27 19:58:38 -07001317 struct usb_host_endpoint *ep)
1318{
1319 int in;
1320 u32 type;
1321
1322 in = usb_endpoint_dir_in(&ep->desc);
1323 if (usb_endpoint_xfer_control(&ep->desc)) {
1324 type = EP_TYPE(CTRL_EP);
1325 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1326 if (in)
1327 type = EP_TYPE(BULK_IN_EP);
1328 else
1329 type = EP_TYPE(BULK_OUT_EP);
1330 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1331 if (in)
1332 type = EP_TYPE(ISOC_IN_EP);
1333 else
1334 type = EP_TYPE(ISOC_OUT_EP);
1335 } else if (usb_endpoint_xfer_int(&ep->desc)) {
1336 if (in)
1337 type = EP_TYPE(INT_IN_EP);
1338 else
1339 type = EP_TYPE(INT_OUT_EP);
1340 } else {
1341 BUG();
1342 }
1343 return type;
1344}
1345
Sarah Sharp9238f252010-04-16 08:07:27 -07001346/* Return the maximum endpoint service interval time (ESIT) payload.
1347 * Basically, this is the maxpacket size, multiplied by the burst size
1348 * and mult size.
1349 */
Dmitry Torokhov575688e2011-03-20 02:15:16 -07001350static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
Sarah Sharp9238f252010-04-16 08:07:27 -07001351 struct usb_device *udev,
1352 struct usb_host_endpoint *ep)
1353{
1354 int max_burst;
1355 int max_packet;
1356
1357 /* Only applies for interrupt or isochronous endpoints */
1358 if (usb_endpoint_xfer_control(&ep->desc) ||
1359 usb_endpoint_xfer_bulk(&ep->desc))
1360 return 0;
1361
Alan Stern842f1692010-04-30 12:44:46 -04001362 if (udev->speed == USB_SPEED_SUPER)
Sebastian Andrzej Siewior64b3c302011-04-11 20:19:12 +02001363 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
Sarah Sharp9238f252010-04-16 08:07:27 -07001364
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001365 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1366 max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
Sarah Sharp9238f252010-04-16 08:07:27 -07001367 /* A 0 in max burst means 1 transfer per ESIT */
1368 return max_packet * (max_burst + 1);
1369}
1370
Sarah Sharp8df75f42010-04-02 15:34:16 -07001371/* Set up an endpoint with one ring segment. Do not allocate stream rings.
1372 * Drivers will have to call usb_alloc_streams() to do that.
1373 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07001374int xhci_endpoint_init(struct xhci_hcd *xhci,
1375 struct xhci_virt_device *virt_dev,
1376 struct usb_device *udev,
Sarah Sharpf88ba782009-05-14 11:44:22 -07001377 struct usb_host_endpoint *ep,
1378 gfp_t mem_flags)
Sarah Sharpf94e01862009-04-27 19:58:38 -07001379{
1380 unsigned int ep_index;
1381 struct xhci_ep_ctx *ep_ctx;
1382 struct xhci_ring *ep_ring;
1383 unsigned int max_packet;
1384 unsigned int max_burst;
Andiry Xu3b72fca2012-03-05 17:49:32 +08001385 enum xhci_ring_type type;
Sarah Sharp9238f252010-04-16 08:07:27 -07001386 u32 max_esit_payload;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001387
1388 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001389 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001390
Andiry Xu3b72fca2012-03-05 17:49:32 +08001391 type = usb_endpoint_type(&ep->desc);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001392 /* Set up the endpoint ring */
Andiry Xu8dfec612012-03-05 17:49:37 +08001393 virt_dev->eps[ep_index].new_ring =
Andiry Xu2fdcd472012-03-05 17:49:39 +08001394 xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
Sarah Sharp74f9fe22009-12-03 09:44:29 -08001395 if (!virt_dev->eps[ep_index].new_ring) {
1396 /* Attempt to use the ring cache */
1397 if (virt_dev->num_rings_cached == 0)
1398 return -ENOMEM;
1399 virt_dev->eps[ep_index].new_ring =
1400 virt_dev->ring_cache[virt_dev->num_rings_cached];
1401 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1402 virt_dev->num_rings_cached--;
Andiry Xu7e393a82011-09-23 14:19:54 -07001403 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
Andiry Xu186a7ef2012-03-05 17:49:36 +08001404 1, type);
Sarah Sharp74f9fe22009-12-03 09:44:29 -08001405 }
Andiry Xud18240d2010-07-22 15:23:25 -07001406 virt_dev->eps[ep_index].skip = false;
Sarah Sharp63a0d9a2009-09-04 10:53:09 -07001407 ep_ring = virt_dev->eps[ep_index].new_ring;
Matt Evans28ccd292011-03-29 13:40:46 +11001408 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001409
Matt Evans28ccd292011-03-29 13:40:46 +11001410 ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1411 | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001412
1413 /* FIXME dig Mult and streams info out of ep companion desc */
1414
Sarah Sharp47692d12009-07-27 12:04:27 -07001415 /* Allow 3 retries for everything but isoc;
Andiry Xu7b1fc2e2011-05-05 18:14:00 +08001416 * CErr shall be set to 0 for Isoch endpoints.
Sarah Sharp47692d12009-07-27 12:04:27 -07001417 */
Sarah Sharpf94e01862009-04-27 19:58:38 -07001418 if (!usb_endpoint_xfer_isoc(&ep->desc))
Matt Evans28ccd292011-03-29 13:40:46 +11001419 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001420 else
Andiry Xu7b1fc2e2011-05-05 18:14:00 +08001421 ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001422
Matt Evans28ccd292011-03-29 13:40:46 +11001423 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001424
1425 /* Set the max packet size and max burst */
1426 switch (udev->speed) {
1427 case USB_SPEED_SUPER:
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001428 max_packet = usb_endpoint_maxp(&ep->desc);
Matt Evans28ccd292011-03-29 13:40:46 +11001429 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
Sarah Sharpb10de142009-04-27 19:58:50 -07001430 /* dig out max burst from ep companion desc */
Alan Stern842f1692010-04-30 12:44:46 -04001431 max_packet = ep->ss_ep_comp.bMaxBurst;
Matt Evans28ccd292011-03-29 13:40:46 +11001432 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001433 break;
1434 case USB_SPEED_HIGH:
1435 /* bits 11:12 specify the number of additional transaction
1436 * opportunities per microframe (USB 2.0, section 9.6.6)
1437 */
1438 if (usb_endpoint_xfer_isoc(&ep->desc) ||
1439 usb_endpoint_xfer_int(&ep->desc)) {
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001440 max_burst = (usb_endpoint_maxp(&ep->desc)
Matt Evans28ccd292011-03-29 13:40:46 +11001441 & 0x1800) >> 11;
1442 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001443 }
1444 /* Fall through */
1445 case USB_SPEED_FULL:
1446 case USB_SPEED_LOW:
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07001447 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
Matt Evans28ccd292011-03-29 13:40:46 +11001448 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
Sarah Sharpf94e01862009-04-27 19:58:38 -07001449 break;
1450 default:
1451 BUG();
1452 }
Sarah Sharp9238f252010-04-16 08:07:27 -07001453 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
Matt Evans28ccd292011-03-29 13:40:46 +11001454 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
Sarah Sharp9238f252010-04-16 08:07:27 -07001455
1456 /*
1457 * XXX no idea how to calculate the average TRB buffer length for bulk
1458 * endpoints, as the driver gives us no clue how big each scatter gather
1459 * list entry (or buffer) is going to be.
1460 *
1461 * For isochronous and interrupt endpoints, we set it to the max
1462 * available, until we have new API in the USB core to allow drivers to
1463 * declare how much bandwidth they actually need.
1464 *
1465 * Normally, it would be calculated by taking the total of the buffer
1466 * lengths in the TD and then dividing by the number of TRBs in a TD,
1467 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
1468 * use Event Data TRBs, and we don't chain in a link TRB on short
1469 * transfers, we're basically dividing by 1.
Andiry Xu51eb01a2011-05-05 18:13:58 +08001470 *
1471 * xHCI 1.0 specification indicates that the Average TRB Length should
1472 * be set to 8 for control endpoints.
Sarah Sharp9238f252010-04-16 08:07:27 -07001473 */
Andiry Xu51eb01a2011-05-05 18:13:58 +08001474 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1475 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1476 else
1477 ep_ctx->tx_info |=
1478 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
Sarah Sharp9238f252010-04-16 08:07:27 -07001479
Sarah Sharpf94e01862009-04-27 19:58:38 -07001480 /* FIXME Debug endpoint context */
1481 return 0;
1482}
1483
1484void xhci_endpoint_zero(struct xhci_hcd *xhci,
1485 struct xhci_virt_device *virt_dev,
1486 struct usb_host_endpoint *ep)
1487{
1488 unsigned int ep_index;
1489 struct xhci_ep_ctx *ep_ctx;
1490
1491 ep_index = xhci_get_endpoint_index(&ep->desc);
John Yound115b042009-07-27 12:05:15 -07001492 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
Sarah Sharpf94e01862009-04-27 19:58:38 -07001493
1494 ep_ctx->ep_info = 0;
1495 ep_ctx->ep_info2 = 0;
Sarah Sharp8e595a52009-07-27 12:03:31 -07001496 ep_ctx->deq = 0;
Sarah Sharpf94e01862009-04-27 19:58:38 -07001497 ep_ctx->tx_info = 0;
1498 /* Don't free the endpoint ring until the set interface or configuration
1499 * request succeeds.
1500 */
1501}
1502
Sarah Sharp9af5d712011-09-02 11:05:48 -07001503void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1504{
1505 bw_info->ep_interval = 0;
1506 bw_info->mult = 0;
1507 bw_info->num_packets = 0;
1508 bw_info->max_packet_size = 0;
1509 bw_info->type = 0;
1510 bw_info->max_esit_payload = 0;
1511}
1512
1513void xhci_update_bw_info(struct xhci_hcd *xhci,
1514 struct xhci_container_ctx *in_ctx,
1515 struct xhci_input_control_ctx *ctrl_ctx,
1516 struct xhci_virt_device *virt_dev)
1517{
1518 struct xhci_bw_info *bw_info;
1519 struct xhci_ep_ctx *ep_ctx;
1520 unsigned int ep_type;
1521 int i;
1522
1523 for (i = 1; i < 31; ++i) {
1524 bw_info = &virt_dev->eps[i].bw_info;
1525
1526 /* We can't tell what endpoint type is being dropped, but
1527 * unconditionally clearing the bandwidth info for non-periodic
1528 * endpoints should be harmless because the info will never be
1529 * set in the first place.
1530 */
1531 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1532 /* Dropped endpoint */
1533 xhci_clear_endpoint_bw_info(bw_info);
1534 continue;
1535 }
1536
1537 if (EP_IS_ADDED(ctrl_ctx, i)) {
1538 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1539 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1540
1541 /* Ignore non-periodic endpoints */
1542 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1543 ep_type != ISOC_IN_EP &&
1544 ep_type != INT_IN_EP)
1545 continue;
1546
1547 /* Added or changed endpoint */
1548 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1549 le32_to_cpu(ep_ctx->ep_info));
Sarah Sharp170c0262011-09-13 16:41:12 -07001550 /* Number of packets and mult are zero-based in the
1551 * input context, but we want one-based for the
1552 * interval table.
Sarah Sharp9af5d712011-09-02 11:05:48 -07001553 */
Sarah Sharp170c0262011-09-13 16:41:12 -07001554 bw_info->mult = CTX_TO_EP_MULT(
1555 le32_to_cpu(ep_ctx->ep_info)) + 1;
Sarah Sharp9af5d712011-09-02 11:05:48 -07001556 bw_info->num_packets = CTX_TO_MAX_BURST(
1557 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1558 bw_info->max_packet_size = MAX_PACKET_DECODED(
1559 le32_to_cpu(ep_ctx->ep_info2));
1560 bw_info->type = ep_type;
1561 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1562 le32_to_cpu(ep_ctx->tx_info));
1563 }
1564 }
1565}
1566
Sarah Sharpf2217e82009-08-07 14:04:43 -07001567/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1568 * Useful when you want to change one particular aspect of the endpoint and then
1569 * issue a configure endpoint command.
1570 */
1571void xhci_endpoint_copy(struct xhci_hcd *xhci,
Sarah Sharp913a8a32009-09-04 10:53:13 -07001572 struct xhci_container_ctx *in_ctx,
1573 struct xhci_container_ctx *out_ctx,
1574 unsigned int ep_index)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001575{
1576 struct xhci_ep_ctx *out_ep_ctx;
1577 struct xhci_ep_ctx *in_ep_ctx;
1578
Sarah Sharp913a8a32009-09-04 10:53:13 -07001579 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1580 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001581
1582 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1583 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1584 in_ep_ctx->deq = out_ep_ctx->deq;
1585 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1586}
1587
1588/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1589 * Useful when you want to change one particular aspect of the endpoint and then
1590 * issue a configure endpoint command. Only the context entries field matters,
1591 * but we'll copy the whole thing anyway.
1592 */
Sarah Sharp913a8a32009-09-04 10:53:13 -07001593void xhci_slot_copy(struct xhci_hcd *xhci,
1594 struct xhci_container_ctx *in_ctx,
1595 struct xhci_container_ctx *out_ctx)
Sarah Sharpf2217e82009-08-07 14:04:43 -07001596{
1597 struct xhci_slot_ctx *in_slot_ctx;
1598 struct xhci_slot_ctx *out_slot_ctx;
1599
Sarah Sharp913a8a32009-09-04 10:53:13 -07001600 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1601 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
Sarah Sharpf2217e82009-08-07 14:04:43 -07001602
1603 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1604 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1605 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1606 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1607}
1608
John Youn254c80a2009-07-27 12:05:03 -07001609/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1610static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1611{
1612 int i;
1613 struct device *dev = xhci_to_hcd(xhci)->self.controller;
1614 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1615
1616 xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
1617
1618 if (!num_sp)
1619 return 0;
1620
1621 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1622 if (!xhci->scratchpad)
1623 goto fail_sp;
1624
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001625 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
John Youn254c80a2009-07-27 12:05:03 -07001626 num_sp * sizeof(u64),
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001627 &xhci->scratchpad->sp_dma, flags);
John Youn254c80a2009-07-27 12:05:03 -07001628 if (!xhci->scratchpad->sp_array)
1629 goto fail_sp2;
1630
1631 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1632 if (!xhci->scratchpad->sp_buffers)
1633 goto fail_sp3;
1634
1635 xhci->scratchpad->sp_dma_buffers =
1636 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1637
1638 if (!xhci->scratchpad->sp_dma_buffers)
1639 goto fail_sp4;
1640
Matt Evans28ccd292011-03-29 13:40:46 +11001641 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
John Youn254c80a2009-07-27 12:05:03 -07001642 for (i = 0; i < num_sp; i++) {
1643 dma_addr_t dma;
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001644 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1645 flags);
John Youn254c80a2009-07-27 12:05:03 -07001646 if (!buf)
1647 goto fail_sp5;
1648
1649 xhci->scratchpad->sp_array[i] = dma;
1650 xhci->scratchpad->sp_buffers[i] = buf;
1651 xhci->scratchpad->sp_dma_buffers[i] = dma;
1652 }
1653
1654 return 0;
1655
1656 fail_sp5:
1657 for (i = i - 1; i >= 0; i--) {
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001658 dma_free_coherent(dev, xhci->page_size,
John Youn254c80a2009-07-27 12:05:03 -07001659 xhci->scratchpad->sp_buffers[i],
1660 xhci->scratchpad->sp_dma_buffers[i]);
1661 }
1662 kfree(xhci->scratchpad->sp_dma_buffers);
1663
1664 fail_sp4:
1665 kfree(xhci->scratchpad->sp_buffers);
1666
1667 fail_sp3:
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001668 dma_free_coherent(dev, num_sp * sizeof(u64),
John Youn254c80a2009-07-27 12:05:03 -07001669 xhci->scratchpad->sp_array,
1670 xhci->scratchpad->sp_dma);
1671
1672 fail_sp2:
1673 kfree(xhci->scratchpad);
1674 xhci->scratchpad = NULL;
1675
1676 fail_sp:
1677 return -ENOMEM;
1678}
1679
1680static void scratchpad_free(struct xhci_hcd *xhci)
1681{
1682 int num_sp;
1683 int i;
1684 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1685
1686 if (!xhci->scratchpad)
1687 return;
1688
1689 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1690
1691 for (i = 0; i < num_sp; i++) {
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001692 dma_free_coherent(&pdev->dev, xhci->page_size,
John Youn254c80a2009-07-27 12:05:03 -07001693 xhci->scratchpad->sp_buffers[i],
1694 xhci->scratchpad->sp_dma_buffers[i]);
1695 }
1696 kfree(xhci->scratchpad->sp_dma_buffers);
1697 kfree(xhci->scratchpad->sp_buffers);
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001698 dma_free_coherent(&pdev->dev, num_sp * sizeof(u64),
John Youn254c80a2009-07-27 12:05:03 -07001699 xhci->scratchpad->sp_array,
1700 xhci->scratchpad->sp_dma);
1701 kfree(xhci->scratchpad);
1702 xhci->scratchpad = NULL;
1703}
1704
Sarah Sharp913a8a32009-09-04 10:53:13 -07001705struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
Sarah Sharpa1d78c12009-12-09 15:59:03 -08001706 bool allocate_in_ctx, bool allocate_completion,
1707 gfp_t mem_flags)
Sarah Sharp913a8a32009-09-04 10:53:13 -07001708{
1709 struct xhci_command *command;
1710
1711 command = kzalloc(sizeof(*command), mem_flags);
1712 if (!command)
1713 return NULL;
1714
Sarah Sharpa1d78c12009-12-09 15:59:03 -08001715 if (allocate_in_ctx) {
1716 command->in_ctx =
1717 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1718 mem_flags);
1719 if (!command->in_ctx) {
1720 kfree(command);
1721 return NULL;
1722 }
Julia Lawall06e18292009-11-21 12:51:47 +01001723 }
Sarah Sharp913a8a32009-09-04 10:53:13 -07001724
1725 if (allocate_completion) {
1726 command->completion =
1727 kzalloc(sizeof(struct completion), mem_flags);
1728 if (!command->completion) {
1729 xhci_free_container_ctx(xhci, command->in_ctx);
Julia Lawall06e18292009-11-21 12:51:47 +01001730 kfree(command);
Sarah Sharp913a8a32009-09-04 10:53:13 -07001731 return NULL;
1732 }
1733 init_completion(command->completion);
1734 }
1735
1736 command->status = 0;
1737 INIT_LIST_HEAD(&command->cmd_list);
1738 return command;
1739}
1740
Andiry Xu8e51adc2010-07-22 15:23:31 -07001741void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
1742{
Andiry Xu2ffdea22011-09-02 11:05:57 -07001743 if (urb_priv) {
1744 kfree(urb_priv->td[0]);
1745 kfree(urb_priv);
Andiry Xu8e51adc2010-07-22 15:23:31 -07001746 }
Andiry Xu8e51adc2010-07-22 15:23:31 -07001747}
1748
Sarah Sharp913a8a32009-09-04 10:53:13 -07001749void xhci_free_command(struct xhci_hcd *xhci,
1750 struct xhci_command *command)
1751{
1752 xhci_free_container_ctx(xhci,
1753 command->in_ctx);
1754 kfree(command->completion);
1755 kfree(command);
1756}
1757
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001758void xhci_mem_cleanup(struct xhci_hcd *xhci)
1759{
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001760 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
Andiry Xu95743232011-09-23 14:19:51 -07001761 struct dev_info *dev_info, *next;
Elric Fub92cc662012-06-27 16:31:12 +08001762 struct xhci_cd *cur_cd, *next_cd;
Andiry Xu95743232011-09-23 14:19:51 -07001763 unsigned long flags;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001764 int size;
Takashi Iwai32f1d2c2012-06-01 10:06:24 +02001765 int i, j, num_ports;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001766
1767 /* Free the Event Ring Segment Table and the actual Event Ring */
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001768 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1769 if (xhci->erst.entries)
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001770 dma_free_coherent(&pdev->dev, size,
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001771 xhci->erst.entries, xhci->erst.erst_dma_addr);
1772 xhci->erst.entries = NULL;
1773 xhci_dbg(xhci, "Freed ERST\n");
1774 if (xhci->event_ring)
1775 xhci_ring_free(xhci, xhci->event_ring);
1776 xhci->event_ring = NULL;
1777 xhci_dbg(xhci, "Freed event ring\n");
1778
Sarah Sharpdbc33302012-05-08 07:32:03 -07001779 if (xhci->lpm_command)
1780 xhci_free_command(xhci, xhci->lpm_command);
Sarah Sharp33b28312012-05-08 07:09:26 -07001781 xhci->cmd_ring_reserved_trbs = 0;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001782 if (xhci->cmd_ring)
1783 xhci_ring_free(xhci, xhci->cmd_ring);
1784 xhci->cmd_ring = NULL;
1785 xhci_dbg(xhci, "Freed command ring\n");
Elric Fub92cc662012-06-27 16:31:12 +08001786 list_for_each_entry_safe(cur_cd, next_cd,
1787 &xhci->cancel_cmd_list, cancel_cmd_list) {
1788 list_del(&cur_cd->cancel_cmd_list);
1789 kfree(cur_cd);
1790 }
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001791
1792 for (i = 1; i < MAX_HC_SLOTS; ++i)
1793 xhci_free_virt_device(xhci, i);
1794
Sarah Sharp0ebbab32009-04-27 19:52:34 -07001795 if (xhci->segment_pool)
1796 dma_pool_destroy(xhci->segment_pool);
1797 xhci->segment_pool = NULL;
1798 xhci_dbg(xhci, "Freed segment pool\n");
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001799
1800 if (xhci->device_pool)
1801 dma_pool_destroy(xhci->device_pool);
1802 xhci->device_pool = NULL;
1803 xhci_dbg(xhci, "Freed device context pool\n");
1804
Sarah Sharp8df75f42010-04-02 15:34:16 -07001805 if (xhci->small_streams_pool)
1806 dma_pool_destroy(xhci->small_streams_pool);
1807 xhci->small_streams_pool = NULL;
1808 xhci_dbg(xhci, "Freed small stream array pool\n");
1809
1810 if (xhci->medium_streams_pool)
1811 dma_pool_destroy(xhci->medium_streams_pool);
1812 xhci->medium_streams_pool = NULL;
1813 xhci_dbg(xhci, "Freed medium stream array pool\n");
1814
Sarah Sharpa74588f2009-04-27 19:53:42 -07001815 if (xhci->dcbaa)
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07001816 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
Sarah Sharpa74588f2009-04-27 19:53:42 -07001817 xhci->dcbaa, xhci->dcbaa->dma);
1818 xhci->dcbaa = NULL;
Sarah Sharp3ffbba92009-04-27 19:57:38 -07001819
Sarah Sharp5294bea2009-11-04 11:22:19 -08001820 scratchpad_free(xhci);
Sarah Sharpda6699c2010-10-26 16:47:13 -07001821
Andiry Xu95743232011-09-23 14:19:51 -07001822 spin_lock_irqsave(&xhci->lock, flags);
1823 list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) {
1824 list_del(&dev_info->list);
1825 kfree(dev_info);
1826 }
1827 spin_unlock_irqrestore(&xhci->lock, flags);
1828
Takashi Iwai32f1d2c2012-06-01 10:06:24 +02001829 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1830 for (i = 0; i < num_ports; i++) {
1831 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1832 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1833 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1834 while (!list_empty(ep))
1835 list_del_init(ep->next);
Oliver Neukumf8a9e722012-05-10 10:19:21 +02001836 }
1837 }
1838
Takashi Iwai32f1d2c2012-06-01 10:06:24 +02001839 for (i = 0; i < num_ports; i++) {
1840 struct xhci_tt_bw_info *tt, *n;
1841 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1842 list_del(&tt->tt_list);
1843 kfree(tt);
1844 }
Oliver Neukumf8a9e722012-05-10 10:19:21 +02001845 }
1846
Sarah Sharpda6699c2010-10-26 16:47:13 -07001847 xhci->num_usb2_ports = 0;
1848 xhci->num_usb3_ports = 0;
Oliver Neukumf8a9e722012-05-10 10:19:21 +02001849 xhci->num_active_eps = 0;
Sarah Sharpda6699c2010-10-26 16:47:13 -07001850 kfree(xhci->usb2_ports);
1851 kfree(xhci->usb3_ports);
1852 kfree(xhci->port_array);
Sarah Sharp839c8172011-09-02 11:05:47 -07001853 kfree(xhci->rh_bw);
Sarah Sharpda6699c2010-10-26 16:47:13 -07001854
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001855 xhci->page_size = 0;
1856 xhci->page_shift = 0;
Sarah Sharp20b67cf2010-12-15 12:47:14 -08001857 xhci->bus_state[0].bus_suspended = 0;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08001858 xhci->bus_state[1].bus_suspended = 0;
Sarah Sharp66d4ead2009-04-27 19:52:28 -07001859}
1860
Sarah Sharp6648f292009-11-09 13:35:23 -08001861static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1862 struct xhci_segment *input_seg,
1863 union xhci_trb *start_trb,
1864 union xhci_trb *end_trb,
1865 dma_addr_t input_dma,
1866 struct xhci_segment *result_seg,
1867 char *test_name, int test_number)
1868{
1869 unsigned long long start_dma;
1870 unsigned long long end_dma;
1871 struct xhci_segment *seg;
1872
1873 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1874 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1875
1876 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1877 if (seg != result_seg) {
1878 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1879 test_name, test_number);
1880 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1881 "input DMA 0x%llx\n",
1882 input_seg,
1883 (unsigned long long) input_dma);
1884 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1885 "ending TRB %p (0x%llx DMA)\n",
1886 start_trb, start_dma,
1887 end_trb, end_dma);
1888 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1889 result_seg, seg);
1890 return -1;
1891 }
1892 return 0;
1893}
1894
1895/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1896static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1897{
1898 struct {
1899 dma_addr_t input_dma;
1900 struct xhci_segment *result_seg;
1901 } simple_test_vector [] = {
1902 /* A zeroed DMA field should fail */
1903 { 0, NULL },
1904 /* One TRB before the ring start should fail */
1905 { xhci->event_ring->first_seg->dma - 16, NULL },
1906 /* One byte before the ring start should fail */
1907 { xhci->event_ring->first_seg->dma - 1, NULL },
1908 /* Starting TRB should succeed */
1909 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1910 /* Ending TRB should succeed */
1911 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1912 xhci->event_ring->first_seg },
1913 /* One byte after the ring end should fail */
1914 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1915 /* One TRB after the ring end should fail */
1916 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1917 /* An address of all ones should fail */
1918 { (dma_addr_t) (~0), NULL },
1919 };
1920 struct {
1921 struct xhci_segment *input_seg;
1922 union xhci_trb *start_trb;
1923 union xhci_trb *end_trb;
1924 dma_addr_t input_dma;
1925 struct xhci_segment *result_seg;
1926 } complex_test_vector [] = {
1927 /* Test feeding a valid DMA address from a different ring */
1928 { .input_seg = xhci->event_ring->first_seg,
1929 .start_trb = xhci->event_ring->first_seg->trbs,
1930 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1931 .input_dma = xhci->cmd_ring->first_seg->dma,
1932 .result_seg = NULL,
1933 },
1934 /* Test feeding a valid end TRB from a different ring */
1935 { .input_seg = xhci->event_ring->first_seg,
1936 .start_trb = xhci->event_ring->first_seg->trbs,
1937 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1938 .input_dma = xhci->cmd_ring->first_seg->dma,
1939 .result_seg = NULL,
1940 },
1941 /* Test feeding a valid start and end TRB from a different ring */
1942 { .input_seg = xhci->event_ring->first_seg,
1943 .start_trb = xhci->cmd_ring->first_seg->trbs,
1944 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1945 .input_dma = xhci->cmd_ring->first_seg->dma,
1946 .result_seg = NULL,
1947 },
1948 /* TRB in this ring, but after this TD */
1949 { .input_seg = xhci->event_ring->first_seg,
1950 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1951 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1952 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1953 .result_seg = NULL,
1954 },
1955 /* TRB in this ring, but before this TD */
1956 { .input_seg = xhci->event_ring->first_seg,
1957 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1958 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1959 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1960 .result_seg = NULL,
1961 },
1962 /* TRB in this ring, but after this wrapped TD */
1963 { .input_seg = xhci->event_ring->first_seg,
1964 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1965 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1966 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1967 .result_seg = NULL,
1968 },
1969 /* TRB in this ring, but before this wrapped TD */
1970 { .input_seg = xhci->event_ring->first_seg,
1971 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1972 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1973 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1974 .result_seg = NULL,
1975 },
1976 /* TRB not in this ring, and we have a wrapped TD */
1977 { .input_seg = xhci->event_ring->first_seg,
1978 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1979 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1980 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1981 .result_seg = NULL,
1982 },
1983 };
1984
1985 unsigned int num_tests;
1986 int i, ret;
1987
Kulikov Vasiliye10fa472010-06-28 15:55:46 +04001988 num_tests = ARRAY_SIZE(simple_test_vector);
Sarah Sharp6648f292009-11-09 13:35:23 -08001989 for (i = 0; i < num_tests; i++) {
1990 ret = xhci_test_trb_in_td(xhci,
1991 xhci->event_ring->first_seg,
1992 xhci->event_ring->first_seg->trbs,
1993 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1994 simple_test_vector[i].input_dma,
1995 simple_test_vector[i].result_seg,
1996 "Simple", i);
1997 if (ret < 0)
1998 return ret;
1999 }
2000
Kulikov Vasiliye10fa472010-06-28 15:55:46 +04002001 num_tests = ARRAY_SIZE(complex_test_vector);
Sarah Sharp6648f292009-11-09 13:35:23 -08002002 for (i = 0; i < num_tests; i++) {
2003 ret = xhci_test_trb_in_td(xhci,
2004 complex_test_vector[i].input_seg,
2005 complex_test_vector[i].start_trb,
2006 complex_test_vector[i].end_trb,
2007 complex_test_vector[i].input_dma,
2008 complex_test_vector[i].result_seg,
2009 "Complex", i);
2010 if (ret < 0)
2011 return ret;
2012 }
2013 xhci_dbg(xhci, "TRB math tests passed.\n");
2014 return 0;
2015}
2016
Sarah Sharp257d5852010-07-29 22:12:56 -07002017static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2018{
2019 u64 temp;
2020 dma_addr_t deq;
2021
2022 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2023 xhci->event_ring->dequeue);
2024 if (deq == 0 && !in_interrupt())
2025 xhci_warn(xhci, "WARN something wrong with SW event ring "
2026 "dequeue ptr.\n");
2027 /* Update HC event ring dequeue pointer */
2028 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2029 temp &= ERST_PTR_MASK;
2030 /* Don't clear the EHB bit (which is RW1C) because
2031 * there might be more events to service.
2032 */
2033 temp &= ~ERST_EHB;
2034 xhci_dbg(xhci, "// Write event ring dequeue pointer, "
2035 "preserving EHB bit\n");
2036 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2037 &xhci->ir_set->erst_dequeue);
2038}
2039
Sarah Sharpda6699c2010-10-26 16:47:13 -07002040static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
Matt Evans28ccd292011-03-29 13:40:46 +11002041 __le32 __iomem *addr, u8 major_revision)
Sarah Sharpda6699c2010-10-26 16:47:13 -07002042{
2043 u32 temp, port_offset, port_count;
2044 int i;
2045
2046 if (major_revision > 0x03) {
2047 xhci_warn(xhci, "Ignoring unknown port speed, "
2048 "Ext Cap %p, revision = 0x%x\n",
2049 addr, major_revision);
2050 /* Ignoring port protocol we can't understand. FIXME */
2051 return;
2052 }
2053
2054 /* Port offset and count in the third dword, see section 7.2 */
2055 temp = xhci_readl(xhci, addr + 2);
2056 port_offset = XHCI_EXT_PORT_OFF(temp);
2057 port_count = XHCI_EXT_PORT_COUNT(temp);
2058 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
2059 "count = %u, revision = 0x%x\n",
2060 addr, port_offset, port_count, major_revision);
2061 /* Port count includes the current port offset */
2062 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2063 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2064 return;
Andiry Xufc71ff72011-09-23 14:19:51 -07002065
2066 /* Check the host's USB2 LPM capability */
2067 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2068 (temp & XHCI_L1C)) {
2069 xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
2070 xhci->sw_lpm_support = 1;
2071 }
2072
2073 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2074 xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
2075 xhci->sw_lpm_support = 1;
2076 if (temp & XHCI_HLC) {
2077 xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
2078 xhci->hw_lpm_support = 1;
2079 }
2080 }
2081
Sarah Sharpda6699c2010-10-26 16:47:13 -07002082 port_offset--;
2083 for (i = port_offset; i < (port_offset + port_count); i++) {
2084 /* Duplicate entry. Ignore the port if the revisions differ. */
2085 if (xhci->port_array[i] != 0) {
2086 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2087 " port %u\n", addr, i);
2088 xhci_warn(xhci, "Port was marked as USB %u, "
2089 "duplicated as USB %u\n",
2090 xhci->port_array[i], major_revision);
2091 /* Only adjust the roothub port counts if we haven't
2092 * found a similar duplicate.
2093 */
2094 if (xhci->port_array[i] != major_revision &&
Dan Carpenter22e04872011-03-17 22:39:49 +03002095 xhci->port_array[i] != DUPLICATE_ENTRY) {
Sarah Sharpda6699c2010-10-26 16:47:13 -07002096 if (xhci->port_array[i] == 0x03)
2097 xhci->num_usb3_ports--;
2098 else
2099 xhci->num_usb2_ports--;
Dan Carpenter22e04872011-03-17 22:39:49 +03002100 xhci->port_array[i] = DUPLICATE_ENTRY;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002101 }
2102 /* FIXME: Should we disable the port? */
Sarah Sharpf8bbeab2010-12-09 10:29:00 -08002103 continue;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002104 }
2105 xhci->port_array[i] = major_revision;
2106 if (major_revision == 0x03)
2107 xhci->num_usb3_ports++;
2108 else
2109 xhci->num_usb2_ports++;
2110 }
2111 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2112}
2113
2114/*
2115 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2116 * specify what speeds each port is supposed to be. We can't count on the port
2117 * speed bits in the PORTSC register being correct until a device is connected,
2118 * but we need to set up the two fake roothubs with the correct number of USB
2119 * 3.0 and USB 2.0 ports at host controller initialization time.
2120 */
2121static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2122{
Matt Evans28ccd292011-03-29 13:40:46 +11002123 __le32 __iomem *addr;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002124 u32 offset;
2125 unsigned int num_ports;
Sarah Sharp2e279802011-09-02 11:05:50 -07002126 int i, j, port_index;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002127
2128 addr = &xhci->cap_regs->hcc_params;
2129 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
2130 if (offset == 0) {
2131 xhci_err(xhci, "No Extended Capability registers, "
2132 "unable to set up roothub.\n");
2133 return -ENODEV;
2134 }
2135
2136 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2137 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2138 if (!xhci->port_array)
2139 return -ENOMEM;
2140
Sarah Sharp839c8172011-09-02 11:05:47 -07002141 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2142 if (!xhci->rh_bw)
2143 return -ENOMEM;
Sarah Sharp2e279802011-09-02 11:05:50 -07002144 for (i = 0; i < num_ports; i++) {
2145 struct xhci_interval_bw_table *bw_table;
2146
Sarah Sharp839c8172011-09-02 11:05:47 -07002147 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
Sarah Sharp2e279802011-09-02 11:05:50 -07002148 bw_table = &xhci->rh_bw[i].bw_table;
2149 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2150 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2151 }
Sarah Sharp839c8172011-09-02 11:05:47 -07002152
Sarah Sharpda6699c2010-10-26 16:47:13 -07002153 /*
2154 * For whatever reason, the first capability offset is from the
2155 * capability register base, not from the HCCPARAMS register.
2156 * See section 5.3.6 for offset calculation.
2157 */
2158 addr = &xhci->cap_regs->hc_capbase + offset;
2159 while (1) {
2160 u32 cap_id;
2161
2162 cap_id = xhci_readl(xhci, addr);
2163 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2164 xhci_add_in_port(xhci, num_ports, addr,
2165 (u8) XHCI_EXT_PORT_MAJOR(cap_id));
2166 offset = XHCI_EXT_CAPS_NEXT(cap_id);
2167 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2168 == num_ports)
2169 break;
2170 /*
2171 * Once you're into the Extended Capabilities, the offset is
2172 * always relative to the register holding the offset.
2173 */
2174 addr += offset;
2175 }
2176
2177 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2178 xhci_warn(xhci, "No ports on the roothubs?\n");
2179 return -ENODEV;
2180 }
2181 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
2182 xhci->num_usb2_ports, xhci->num_usb3_ports);
Sarah Sharpd30b2a22010-11-23 10:42:22 -08002183
2184 /* Place limits on the number of roothub ports so that the hub
2185 * descriptors aren't longer than the USB core will allocate.
2186 */
2187 if (xhci->num_usb3_ports > 15) {
2188 xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
2189 xhci->num_usb3_ports = 15;
2190 }
2191 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2192 xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
2193 USB_MAXCHILDREN);
2194 xhci->num_usb2_ports = USB_MAXCHILDREN;
2195 }
2196
Sarah Sharpda6699c2010-10-26 16:47:13 -07002197 /*
2198 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2199 * Not sure how the USB core will handle a hub with no ports...
2200 */
2201 if (xhci->num_usb2_ports) {
2202 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2203 xhci->num_usb2_ports, flags);
2204 if (!xhci->usb2_ports)
2205 return -ENOMEM;
2206
2207 port_index = 0;
Sarah Sharpf8bbeab2010-12-09 10:29:00 -08002208 for (i = 0; i < num_ports; i++) {
2209 if (xhci->port_array[i] == 0x03 ||
2210 xhci->port_array[i] == 0 ||
Dan Carpenter22e04872011-03-17 22:39:49 +03002211 xhci->port_array[i] == DUPLICATE_ENTRY)
Sarah Sharpf8bbeab2010-12-09 10:29:00 -08002212 continue;
2213
2214 xhci->usb2_ports[port_index] =
2215 &xhci->op_regs->port_status_base +
2216 NUM_PORT_REGS*i;
2217 xhci_dbg(xhci, "USB 2.0 port at index %u, "
2218 "addr = %p\n", i,
2219 xhci->usb2_ports[port_index]);
2220 port_index++;
Sarah Sharpd30b2a22010-11-23 10:42:22 -08002221 if (port_index == xhci->num_usb2_ports)
2222 break;
Sarah Sharpf8bbeab2010-12-09 10:29:00 -08002223 }
Sarah Sharpda6699c2010-10-26 16:47:13 -07002224 }
2225 if (xhci->num_usb3_ports) {
2226 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2227 xhci->num_usb3_ports, flags);
2228 if (!xhci->usb3_ports)
2229 return -ENOMEM;
2230
2231 port_index = 0;
2232 for (i = 0; i < num_ports; i++)
2233 if (xhci->port_array[i] == 0x03) {
2234 xhci->usb3_ports[port_index] =
2235 &xhci->op_regs->port_status_base +
2236 NUM_PORT_REGS*i;
2237 xhci_dbg(xhci, "USB 3.0 port at index %u, "
2238 "addr = %p\n", i,
2239 xhci->usb3_ports[port_index]);
2240 port_index++;
Sarah Sharpd30b2a22010-11-23 10:42:22 -08002241 if (port_index == xhci->num_usb3_ports)
2242 break;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002243 }
2244 }
2245 return 0;
2246}
Sarah Sharp6648f292009-11-09 13:35:23 -08002247
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002248int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2249{
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002250 dma_addr_t dma;
2251 struct device *dev = xhci_to_hcd(xhci)->self.controller;
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002252 unsigned int val, val2;
Sarah Sharp8e595a52009-07-27 12:03:31 -07002253 u64 val_64;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002254 struct xhci_segment *seg;
Sarah Sharp623bef92011-11-11 14:57:33 -08002255 u32 page_size, temp;
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002256 int i;
2257
2258 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
2259 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
2260 for (i = 0; i < 16; i++) {
2261 if ((0x1 & page_size) != 0)
2262 break;
2263 page_size = page_size >> 1;
2264 }
2265 if (i < 16)
2266 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
2267 else
2268 xhci_warn(xhci, "WARN: no supported page size\n");
2269 /* Use 4K pages, since that's common and the minimum the HC supports */
2270 xhci->page_shift = 12;
2271 xhci->page_size = 1 << xhci->page_shift;
2272 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
2273
2274 /*
2275 * Program the Number of Device Slots Enabled field in the CONFIG
2276 * register with the max value of slots the HC can handle.
2277 */
2278 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
2279 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
2280 (unsigned int) val);
2281 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
2282 val |= (val2 & ~HCS_SLOTS_MASK);
2283 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
2284 (unsigned int) val);
2285 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
2286
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002287 /*
Sarah Sharpa74588f2009-04-27 19:53:42 -07002288 * Section 5.4.8 - doorbell array must be
2289 * "physically contiguous and 64-byte (cache line) aligned".
2290 */
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07002291 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2292 GFP_KERNEL);
Sarah Sharpa74588f2009-04-27 19:53:42 -07002293 if (!xhci->dcbaa)
2294 goto fail;
2295 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2296 xhci->dcbaa->dma = dma;
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002297 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
2298 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
Sarah Sharp8e595a52009-07-27 12:03:31 -07002299 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
Sarah Sharpa74588f2009-04-27 19:53:42 -07002300
2301 /*
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002302 * Initialize the ring segment pool. The ring must be a contiguous
2303 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2304 * however, the command ring segment needs 64-byte aligned segments,
2305 * so we pick the greater alignment need.
2306 */
2307 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
David Howellseb8ccd22013-03-28 18:48:35 +00002308 TRB_SEGMENT_SIZE, 64, xhci->page_size);
John Yound115b042009-07-27 12:05:15 -07002309
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002310 /* See Table 46 and Note on Figure 55 */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002311 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
John Yound115b042009-07-27 12:05:15 -07002312 2112, 64, xhci->page_size);
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002313 if (!xhci->segment_pool || !xhci->device_pool)
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002314 goto fail;
2315
Sarah Sharp8df75f42010-04-02 15:34:16 -07002316 /* Linear stream context arrays don't have any boundary restrictions,
2317 * and only need to be 16-byte aligned.
2318 */
2319 xhci->small_streams_pool =
2320 dma_pool_create("xHCI 256 byte stream ctx arrays",
2321 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2322 xhci->medium_streams_pool =
2323 dma_pool_create("xHCI 1KB stream ctx arrays",
2324 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2325 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07002326 * will be allocated with dma_alloc_coherent()
Sarah Sharp8df75f42010-04-02 15:34:16 -07002327 */
2328
2329 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2330 goto fail;
2331
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002332 /* Set up the command ring to have one segments for now. */
Andiry Xu186a7ef2012-03-05 17:49:36 +08002333 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002334 if (!xhci->cmd_ring)
2335 goto fail;
Elric Fub92cc662012-06-27 16:31:12 +08002336 INIT_LIST_HEAD(&xhci->cancel_cmd_list);
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002337 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
2338 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
2339 (unsigned long long)xhci->cmd_ring->first_seg->dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002340
2341 /* Set the address in the Command Ring Control register */
Sarah Sharp8e595a52009-07-27 12:03:31 -07002342 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2343 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2344 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002345 xhci->cmd_ring->cycle_state;
Sarah Sharp8e595a52009-07-27 12:03:31 -07002346 xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
2347 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002348 xhci_dbg_cmd_ptrs(xhci);
2349
Sarah Sharpdbc33302012-05-08 07:32:03 -07002350 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
2351 if (!xhci->lpm_command)
2352 goto fail;
2353
2354 /* Reserve one command ring TRB for disabling LPM.
2355 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2356 * disabling LPM, we only need to reserve one TRB for all devices.
2357 */
2358 xhci->cmd_ring_reserved_trbs++;
2359
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002360 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
2361 val &= DBOFF_MASK;
2362 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
2363 " from cap regs base addr\n", val);
Dmitry Torokhovc50a00f2011-02-08 16:29:34 -08002364 xhci->dba = (void __iomem *) xhci->cap_regs + val;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002365 xhci_dbg_regs(xhci);
2366 xhci_print_run_regs(xhci);
2367 /* Set ir_set to interrupt register set 0 */
Dmitry Torokhovc50a00f2011-02-08 16:29:34 -08002368 xhci->ir_set = &xhci->run_regs->ir_set[0];
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002369
2370 /*
2371 * Event ring setup: Allocate a normal ring, but also setup
2372 * the event ring segment table (ERST). Section 4.9.3.
2373 */
2374 xhci_dbg(xhci, "// Allocating event ring\n");
Andiry Xu186a7ef2012-03-05 17:49:36 +08002375 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
Andiry Xu7e393a82011-09-23 14:19:54 -07002376 flags);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002377 if (!xhci->event_ring)
2378 goto fail;
Sarah Sharp6648f292009-11-09 13:35:23 -08002379 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
2380 goto fail;
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002381
Sebastian Andrzej Siewior22d45f02011-09-23 14:19:59 -07002382 xhci->erst.entries = dma_alloc_coherent(dev,
2383 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2384 GFP_KERNEL);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002385 if (!xhci->erst.entries)
2386 goto fail;
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002387 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
2388 (unsigned long long)dma);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002389
2390 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2391 xhci->erst.num_entries = ERST_NUM_SEGS;
2392 xhci->erst.erst_dma_addr = dma;
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002393 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002394 xhci->erst.num_entries,
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002395 xhci->erst.entries,
2396 (unsigned long long)xhci->erst.erst_dma_addr);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002397
2398 /* set ring base address and size for each segment table entry */
2399 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2400 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
Matt Evans28ccd292011-03-29 13:40:46 +11002401 entry->seg_addr = cpu_to_le64(seg->dma);
2402 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002403 entry->rsvd = 0;
2404 seg = seg->next;
2405 }
2406
2407 /* set ERST count with the number of entries in the segment table */
2408 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
2409 val &= ERST_SIZE_MASK;
2410 val |= ERST_NUM_SEGS;
2411 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
2412 val);
2413 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
2414
2415 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
2416 /* set the segment table base address */
Greg Kroah-Hartman700e2052009-04-29 19:14:08 -07002417 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
2418 (unsigned long long)xhci->erst.erst_dma_addr);
Sarah Sharp8e595a52009-07-27 12:03:31 -07002419 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2420 val_64 &= ERST_PTR_MASK;
2421 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2422 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002423
2424 /* Set the event ring dequeue address */
Sarah Sharp23e3be12009-04-29 19:05:20 -07002425 xhci_set_hc_event_deq(xhci);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002426 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
Dmitry Torokhov09ece302011-02-08 16:29:33 -08002427 xhci_print_ir_set(xhci, 0);
Sarah Sharp0ebbab32009-04-27 19:52:34 -07002428
2429 /*
2430 * XXX: Might need to set the Interrupter Moderation Register to
2431 * something other than the default (~1ms minimum between interrupts).
2432 * See section 5.5.1.2.
2433 */
Sarah Sharp3ffbba92009-04-27 19:57:38 -07002434 init_completion(&xhci->addr_dev);
2435 for (i = 0; i < MAX_HC_SLOTS; ++i)
Randy Dunlap326b4812010-04-19 08:53:50 -07002436 xhci->devs[i] = NULL;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08002437 for (i = 0; i < USB_MAXCHILDREN; ++i) {
Sarah Sharp20b67cf2010-12-15 12:47:14 -08002438 xhci->bus_state[0].resume_done[i] = 0;
Sarah Sharpf6ff0ac2010-12-16 11:21:10 -08002439 xhci->bus_state[1].resume_done[i] = 0;
2440 }
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002441
John Youn254c80a2009-07-27 12:05:03 -07002442 if (scratchpad_alloc(xhci, flags))
2443 goto fail;
Sarah Sharpda6699c2010-10-26 16:47:13 -07002444 if (xhci_setup_port_arrays(xhci, flags))
2445 goto fail;
John Youn254c80a2009-07-27 12:05:03 -07002446
Andiry Xu95743232011-09-23 14:19:51 -07002447 INIT_LIST_HEAD(&xhci->lpm_failed_devs);
2448
Sarah Sharp623bef92011-11-11 14:57:33 -08002449 /* Enable USB 3.0 device notifications for function remote wake, which
2450 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2451 * U3 (device suspend).
2452 */
2453 temp = xhci_readl(xhci, &xhci->op_regs->dev_notification);
2454 temp &= ~DEV_NOTE_MASK;
2455 temp |= DEV_NOTE_FWAKE;
2456 xhci_writel(xhci, temp, &xhci->op_regs->dev_notification);
2457
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002458 return 0;
John Youn254c80a2009-07-27 12:05:03 -07002459
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002460fail:
2461 xhci_warn(xhci, "Couldn't initialize memory\n");
Sarah Sharp159e1fc2012-03-16 13:09:39 -07002462 xhci_halt(xhci);
2463 xhci_reset(xhci);
Sarah Sharp66d4ead2009-04-27 19:52:28 -07002464 xhci_mem_cleanup(xhci);
2465 return -ENOMEM;
2466}