blob: 9ce407e5017d478c337e51d054b65e6c873319a9 [file] [log] [blame]
Paul Zimmerman7359d482013-03-11 17:47:59 -07001/*
2 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * This file contains the functions to manage Queue Heads and Queue
39 * Transfer Descriptors for Host mode
40 */
41#include <linux/kernel.h>
42#include <linux/module.h>
43#include <linux/spinlock.h>
44#include <linux/interrupt.h>
45#include <linux/dma-mapping.h>
46#include <linux/io.h>
47#include <linux/slab.h>
48#include <linux/usb.h>
49
50#include <linux/usb/hcd.h>
51#include <linux/usb/ch11.h>
52
53#include "core.h"
54#include "hcd.h"
55
Douglas Anderson17dd5b62016-01-28 18:19:59 -080056/* Wait this long before releasing periodic reservation */
57#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
58
59/**
Douglas Andersonb951c6c2016-01-28 18:20:05 -080060 * dwc2_periodic_channel_available() - Checks that a channel is available for a
61 * periodic transfer
62 *
63 * @hsotg: The HCD state structure for the DWC OTG controller
64 *
65 * Return: 0 if successful, negative error code otherwise
66 */
67static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
68{
69 /*
70 * Currently assuming that there is a dedicated host channel for
71 * each periodic transaction plus at least one host channel for
72 * non-periodic transactions
73 */
74 int status;
75 int num_channels;
76
77 num_channels = hsotg->core_params->host_channels;
78 if (hsotg->periodic_channels + hsotg->non_periodic_channels <
79 num_channels
80 && hsotg->periodic_channels < num_channels - 1) {
81 status = 0;
82 } else {
83 dev_dbg(hsotg->dev,
84 "%s: Total channels: %d, Periodic: %d, "
85 "Non-periodic: %d\n", __func__, num_channels,
86 hsotg->periodic_channels, hsotg->non_periodic_channels);
87 status = -ENOSPC;
88 }
89
90 return status;
91}
92
93/**
94 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
95 * for the specified QH in the periodic schedule
96 *
97 * @hsotg: The HCD state structure for the DWC OTG controller
98 * @qh: QH containing periodic bandwidth required
99 *
100 * Return: 0 if successful, negative error code otherwise
101 *
102 * For simplicity, this calculation assumes that all the transfers in the
103 * periodic schedule may occur in the same (micro)frame
104 */
105static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
106 struct dwc2_qh *qh)
107{
108 int status;
109 s16 max_claimed_usecs;
110
111 status = 0;
112
113 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
114 /*
115 * High speed mode
116 * Max periodic usecs is 80% x 125 usec = 100 usec
117 */
118 max_claimed_usecs = 100 - qh->host_us;
119 } else {
120 /*
121 * Full speed mode
122 * Max periodic usecs is 90% x 1000 usec = 900 usec
123 */
124 max_claimed_usecs = 900 - qh->host_us;
125 }
126
127 if (hsotg->periodic_usecs > max_claimed_usecs) {
128 dev_err(hsotg->dev,
129 "%s: already claimed usecs %d, required usecs %d\n",
130 __func__, hsotg->periodic_usecs, qh->host_us);
131 status = -ENOSPC;
132 }
133
134 return status;
135}
136
137/**
138 * Microframe scheduler
139 * track the total use in hsotg->frame_usecs
140 * keep each qh use in qh->frame_usecs
141 * when surrendering the qh then donate the time back
142 */
143static const unsigned short max_uframe_usecs[] = {
144 100, 100, 100, 100, 100, 100, 30, 0
145};
146
147void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
148{
149 int i;
150
151 for (i = 0; i < 8; i++)
152 hsotg->frame_usecs[i] = max_uframe_usecs[i];
153}
154
155static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
156{
157 unsigned short utime = qh->host_us;
158 int i;
159
160 for (i = 0; i < 8; i++) {
161 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
162 if (utime <= hsotg->frame_usecs[i]) {
163 hsotg->frame_usecs[i] -= utime;
164 qh->frame_usecs[i] += utime;
165 return i;
166 }
167 }
168 return -ENOSPC;
169}
170
171/*
172 * use this for FS apps that can span multiple uframes
173 */
174static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
175{
176 unsigned short utime = qh->host_us;
177 unsigned short xtime;
178 int t_left;
179 int i;
180 int j;
181 int k;
182
183 for (i = 0; i < 8; i++) {
184 if (hsotg->frame_usecs[i] <= 0)
185 continue;
186
187 /*
188 * we need n consecutive slots so use j as a start slot
189 * j plus j+1 must be enough time (for now)
190 */
191 xtime = hsotg->frame_usecs[i];
192 for (j = i + 1; j < 8; j++) {
193 /*
194 * if we add this frame remaining time to xtime we may
195 * be OK, if not we need to test j for a complete frame
196 */
197 if (xtime + hsotg->frame_usecs[j] < utime) {
198 if (hsotg->frame_usecs[j] <
199 max_uframe_usecs[j])
200 continue;
201 }
202 if (xtime >= utime) {
203 t_left = utime;
204 for (k = i; k < 8; k++) {
205 t_left -= hsotg->frame_usecs[k];
206 if (t_left <= 0) {
207 qh->frame_usecs[k] +=
208 hsotg->frame_usecs[k]
209 + t_left;
210 hsotg->frame_usecs[k] = -t_left;
211 return i;
212 } else {
213 qh->frame_usecs[k] +=
214 hsotg->frame_usecs[k];
215 hsotg->frame_usecs[k] = 0;
216 }
217 }
218 }
219 /* add the frame time to x time */
220 xtime += hsotg->frame_usecs[j];
221 /* we must have a fully available next frame or break */
222 if (xtime < utime &&
223 hsotg->frame_usecs[j] == max_uframe_usecs[j])
224 continue;
225 }
226 }
227 return -ENOSPC;
228}
229
230static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
231{
232 int ret;
233
234 if (qh->dev_speed == USB_SPEED_HIGH) {
235 /* if this is a hs transaction we need a full frame */
236 ret = dwc2_find_single_uframe(hsotg, qh);
237 } else {
238 /*
239 * if this is a fs transaction we may need a sequence
240 * of frames
241 */
242 ret = dwc2_find_multi_uframe(hsotg, qh);
243 }
244 return ret;
245}
246
247/**
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800248 * dwc2_do_reserve() - Make a periodic reservation
249 *
250 * Try to allocate space in the periodic schedule. Depending on parameters
251 * this might use the microframe scheduler or the dumb scheduler.
252 *
253 * @hsotg: The HCD state structure for the DWC OTG controller
254 * @qh: QH for the periodic transfer.
255 *
256 * Returns: 0 upon success; error upon failure.
257 */
258static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
259{
260 int status;
261
262 if (hsotg->core_params->uframe_sched > 0) {
263 int frame = -1;
264
265 status = dwc2_find_uframe(hsotg, qh);
266 if (status == 0)
267 frame = 7;
268 else if (status > 0)
269 frame = status - 1;
270
271 /* Set the new frame up */
272 if (frame >= 0) {
273 qh->next_active_frame &= ~0x7;
274 qh->next_active_frame |= (frame & 7);
275 dwc2_sch_dbg(hsotg,
276 "QH=%p sched_p nxt=%04x, uf=%d\n",
277 qh, qh->next_active_frame, frame);
278 }
279
280 if (status > 0)
281 status = 0;
282 } else {
283 status = dwc2_periodic_channel_available(hsotg);
284 if (status) {
285 dev_info(hsotg->dev,
286 "%s: No host channel available for periodic transfer\n",
287 __func__);
288 return status;
289 }
290
291 status = dwc2_check_periodic_bandwidth(hsotg, qh);
292 }
293
294 if (status) {
295 dev_dbg(hsotg->dev,
296 "%s: Insufficient periodic bandwidth for periodic transfer\n",
297 __func__);
298 return status;
299 }
300
301 if (hsotg->core_params->uframe_sched <= 0)
302 /* Reserve periodic channel */
303 hsotg->periodic_channels++;
304
305 /* Update claimed usecs per (micro)frame */
306 hsotg->periodic_usecs += qh->host_us;
307
308 return 0;
309}
310
311/**
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800312 * dwc2_do_unreserve() - Actually release the periodic reservation
313 *
314 * This function actually releases the periodic bandwidth that was reserved
315 * by the given qh.
316 *
317 * @hsotg: The HCD state structure for the DWC OTG controller
318 * @qh: QH for the periodic transfer.
319 */
320static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
321{
322 assert_spin_locked(&hsotg->lock);
323
324 WARN_ON(!qh->unreserve_pending);
325
326 /* No more unreserve pending--we're doing it */
327 qh->unreserve_pending = false;
328
329 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
330 list_del_init(&qh->qh_list_entry);
331
332 /* Update claimed usecs per (micro)frame */
Douglas Andersonced9eee2016-01-28 18:20:04 -0800333 hsotg->periodic_usecs -= qh->host_us;
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800334
335 if (hsotg->core_params->uframe_sched > 0) {
336 int i;
337
338 for (i = 0; i < 8; i++) {
339 hsotg->frame_usecs[i] += qh->frame_usecs[i];
340 qh->frame_usecs[i] = 0;
341 }
342 } else {
343 /* Release periodic channel reservation */
344 hsotg->periodic_channels--;
345 }
346}
347
348/**
349 * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
350 *
351 * According to the kernel doc for usb_submit_urb() (specifically the part about
352 * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
353 * long as a device driver keeps submitting. Since we're using HCD_BH to give
354 * back the URB we need to give the driver a little bit of time before we
355 * release the reservation. This worker is called after the appropriate
356 * delay.
357 *
358 * @work: Pointer to a qh unreserve_work.
359 */
360static void dwc2_unreserve_timer_fn(unsigned long data)
361{
362 struct dwc2_qh *qh = (struct dwc2_qh *)data;
363 struct dwc2_hsotg *hsotg = qh->hsotg;
364 unsigned long flags;
365
366 /*
367 * Wait for the lock, or for us to be scheduled again. We
368 * could be scheduled again if:
369 * - We started executing but didn't get the lock yet.
370 * - A new reservation came in, but cancel didn't take effect
371 * because we already started executing.
372 * - The timer has been kicked again.
373 * In that case cancel and wait for the next call.
374 */
375 while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
376 if (timer_pending(&qh->unreserve_timer))
377 return;
378 }
379
380 /*
381 * Might be no more unreserve pending if:
382 * - We started executing but didn't get the lock yet.
383 * - A new reservation came in, but cancel didn't take effect
384 * because we already started executing.
385 *
386 * We can't put this in the loop above because unreserve_pending needs
387 * to be accessed under lock, so we can only check it once we got the
388 * lock.
389 */
390 if (qh->unreserve_pending)
391 dwc2_do_unreserve(hsotg, qh);
392
393 spin_unlock_irqrestore(&hsotg->lock, flags);
394}
395
Paul Zimmerman7359d482013-03-11 17:47:59 -0700396/**
Douglas Andersonb951c6c2016-01-28 18:20:05 -0800397 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
398 * host channel is large enough to handle the maximum data transfer in a single
399 * (micro)frame for a periodic transfer
400 *
401 * @hsotg: The HCD state structure for the DWC OTG controller
402 * @qh: QH for a periodic endpoint
403 *
404 * Return: 0 if successful, negative error code otherwise
405 */
406static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
407 struct dwc2_qh *qh)
408{
409 u32 max_xfer_size;
410 u32 max_channel_xfer_size;
411 int status = 0;
412
413 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
414 max_channel_xfer_size = hsotg->core_params->max_transfer_size;
415
416 if (max_xfer_size > max_channel_xfer_size) {
417 dev_err(hsotg->dev,
418 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
419 __func__, max_xfer_size, max_channel_xfer_size);
420 status = -ENOSPC;
421 }
422
423 return status;
424}
425
426/**
427 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
428 * the periodic schedule
429 *
430 * @hsotg: The HCD state structure for the DWC OTG controller
431 * @qh: QH for the periodic transfer. The QH should already contain the
432 * scheduling information.
433 *
434 * Return: 0 if successful, negative error code otherwise
435 */
436static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
437{
438 int status;
439
440 status = dwc2_check_max_xfer_size(hsotg, qh);
441 if (status) {
442 dev_dbg(hsotg->dev,
443 "%s: Channel max transfer size too small for periodic transfer\n",
444 __func__);
445 return status;
446 }
447
448 /* Cancel pending unreserve; if canceled OK, unreserve was pending */
449 if (del_timer(&qh->unreserve_timer))
450 WARN_ON(!qh->unreserve_pending);
451
452 /*
453 * Only need to reserve if there's not an unreserve pending, since if an
454 * unreserve is pending then by definition our old reservation is still
455 * valid. Unreserve might still be pending even if we didn't cancel if
456 * dwc2_unreserve_timer_fn() already started. Code in the timer handles
457 * that case.
458 */
459 if (!qh->unreserve_pending) {
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800460 status = dwc2_do_reserve(hsotg, qh);
461 if (status)
Douglas Andersonb951c6c2016-01-28 18:20:05 -0800462 return status;
Douglas Andersonb951c6c2016-01-28 18:20:05 -0800463 }
464
465 qh->unreserve_pending = 0;
466
467 if (hsotg->core_params->dma_desc_enable > 0)
468 /* Don't rely on SOF and start in ready schedule */
469 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
470 else
471 /* Always start in inactive schedule */
472 list_add_tail(&qh->qh_list_entry,
473 &hsotg->periodic_sched_inactive);
474
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800475 return 0;
Douglas Andersonb951c6c2016-01-28 18:20:05 -0800476}
477
478/**
479 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
480 * from the periodic schedule
481 *
482 * @hsotg: The HCD state structure for the DWC OTG controller
483 * @qh: QH for the periodic transfer
484 */
485static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
486 struct dwc2_qh *qh)
487{
488 bool did_modify;
489
490 assert_spin_locked(&hsotg->lock);
491
492 /*
493 * Schedule the unreserve to happen in a little bit. Cases here:
494 * - Unreserve worker might be sitting there waiting to grab the lock.
495 * In this case it will notice it's been schedule again and will
496 * quit.
497 * - Unreserve worker might not be scheduled.
498 *
499 * We should never already be scheduled since dwc2_schedule_periodic()
500 * should have canceled the scheduled unreserve timer (hence the
501 * warning on did_modify).
502 *
503 * We add + 1 to the timer to guarantee that at least 1 jiffy has
504 * passed (otherwise if the jiffy counter might tick right after we
505 * read it and we'll get no delay).
506 */
507 did_modify = mod_timer(&qh->unreserve_timer,
508 jiffies + DWC2_UNRESERVE_DELAY + 1);
509 WARN_ON(did_modify);
510 qh->unreserve_pending = 1;
511
512 list_del_init(&qh->qh_list_entry);
513}
514
515/**
Paul Zimmerman7359d482013-03-11 17:47:59 -0700516 * dwc2_qh_init() - Initializes a QH structure
517 *
518 * @hsotg: The HCD state structure for the DWC OTG controller
519 * @qh: The QH to init
520 * @urb: Holds the information about the device/endpoint needed to initialize
521 * the QH
522 */
523#define SCHEDULE_SLOP 10
524static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
525 struct dwc2_hcd_urb *urb)
526{
527 int dev_speed, hub_addr, hub_port;
528 char *speed, *type;
529
530 dev_vdbg(hsotg->dev, "%s()\n", __func__);
531
532 /* Initialize QH */
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800533 qh->hsotg = hsotg;
534 setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
535 (unsigned long)qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700536 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
537 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
538
539 qh->data_toggle = DWC2_HC_PID_DATA0;
540 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
541 INIT_LIST_HEAD(&qh->qtd_list);
542 INIT_LIST_HEAD(&qh->qh_list_entry);
543
544 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
545 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
546
547 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
548
549 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
550 hub_addr != 0 && hub_addr != 1) {
551 dev_vdbg(hsotg->dev,
552 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
553 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
554 hub_port);
555 qh->do_split = 1;
556 }
557
558 if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
559 qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
560 /* Compute scheduling parameters once and save them */
561 u32 hprt, prtspd;
562
563 /* Todo: Account for split transfers in the bus time */
564 int bytecount =
565 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
566
Douglas Andersonced9eee2016-01-28 18:20:04 -0800567 qh->host_us = NS_TO_US(usb_calc_bus_time(qh->do_split ?
568 USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
569 qh->ep_type == USB_ENDPOINT_XFER_ISOC,
570 bytecount));
Gregory Herrerodd81dd72015-09-22 15:16:52 +0200571
572 /* Ensure frame_number corresponds to the reality */
573 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700574 /* Start in a slightly future (micro)frame */
Douglas Andersonced9eee2016-01-28 18:20:04 -0800575 qh->next_active_frame = dwc2_frame_num_inc(hsotg->frame_number,
Paul Zimmerman7359d482013-03-11 17:47:59 -0700576 SCHEDULE_SLOP);
Douglas Andersonced9eee2016-01-28 18:20:04 -0800577 qh->host_interval = urb->interval;
578 dwc2_sch_dbg(hsotg, "QH=%p init nxt=%04x, fn=%04x, int=%#x\n",
579 qh, qh->next_active_frame, hsotg->frame_number,
580 qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700581#if 0
582 /* Increase interrupt polling rate for debugging */
583 if (qh->ep_type == USB_ENDPOINT_XFER_INT)
Douglas Andersonced9eee2016-01-28 18:20:04 -0800584 qh->host_interval = 8;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700585#endif
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300586 hprt = dwc2_readl(hsotg->regs + HPRT0);
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200587 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700588 if (prtspd == HPRT0_SPD_HIGH_SPEED &&
589 (dev_speed == USB_SPEED_LOW ||
590 dev_speed == USB_SPEED_FULL)) {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800591 qh->host_interval *= 8;
592 qh->next_active_frame |= 0x7;
593 qh->start_split_frame = qh->next_active_frame;
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800594 dwc2_sch_dbg(hsotg,
Douglas Andersonced9eee2016-01-28 18:20:04 -0800595 "QH=%p init*8 nxt=%04x, fn=%04x, int=%#x\n",
596 qh, qh->next_active_frame,
597 hsotg->frame_number, qh->host_interval);
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800598
Paul Zimmerman7359d482013-03-11 17:47:59 -0700599 }
Douglas Andersonced9eee2016-01-28 18:20:04 -0800600 dev_dbg(hsotg->dev, "interval=%d\n", qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700601 }
602
603 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
604 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
605 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
606 dwc2_hcd_get_dev_addr(&urb->pipe_info));
607 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
608 dwc2_hcd_get_ep_num(&urb->pipe_info),
609 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
610
611 qh->dev_speed = dev_speed;
612
613 switch (dev_speed) {
614 case USB_SPEED_LOW:
615 speed = "low";
616 break;
617 case USB_SPEED_FULL:
618 speed = "full";
619 break;
620 case USB_SPEED_HIGH:
621 speed = "high";
622 break;
623 default:
624 speed = "?";
625 break;
626 }
627 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
628
629 switch (qh->ep_type) {
630 case USB_ENDPOINT_XFER_ISOC:
631 type = "isochronous";
632 break;
633 case USB_ENDPOINT_XFER_INT:
634 type = "interrupt";
635 break;
636 case USB_ENDPOINT_XFER_CONTROL:
637 type = "control";
638 break;
639 case USB_ENDPOINT_XFER_BULK:
640 type = "bulk";
641 break;
642 default:
643 type = "?";
644 break;
645 }
646
647 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
648
649 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
650 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
Douglas Andersonced9eee2016-01-28 18:20:04 -0800651 qh->host_us);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700652 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
Douglas Andersonced9eee2016-01-28 18:20:04 -0800653 qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700654 }
655}
656
657/**
658 * dwc2_hcd_qh_create() - Allocates and initializes a QH
659 *
660 * @hsotg: The HCD state structure for the DWC OTG controller
661 * @urb: Holds the information about the device/endpoint needed
662 * to initialize the QH
663 * @atomic_alloc: Flag to do atomic allocation if needed
664 *
665 * Return: Pointer to the newly allocated QH, or NULL on error
666 */
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200667struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
Paul Zimmerman7359d482013-03-11 17:47:59 -0700668 struct dwc2_hcd_urb *urb,
669 gfp_t mem_flags)
670{
671 struct dwc2_qh *qh;
672
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700673 if (!urb->priv)
674 return NULL;
675
Paul Zimmerman7359d482013-03-11 17:47:59 -0700676 /* Allocate memory */
677 qh = kzalloc(sizeof(*qh), mem_flags);
678 if (!qh)
679 return NULL;
680
681 dwc2_qh_init(hsotg, qh, urb);
682
683 if (hsotg->core_params->dma_desc_enable > 0 &&
684 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
685 dwc2_hcd_qh_free(hsotg, qh);
686 return NULL;
687 }
688
689 return qh;
690}
691
692/**
693 * dwc2_hcd_qh_free() - Frees the QH
694 *
695 * @hsotg: HCD instance
696 * @qh: The QH to free
697 *
698 * QH should already be removed from the list. QTD list should already be empty
699 * if called from URB Dequeue.
700 *
701 * Must NOT be called with interrupt disabled or spinlock held
702 */
703void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
704{
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800705 /* Make sure any unreserve work is finished. */
706 if (del_timer_sync(&qh->unreserve_timer)) {
707 unsigned long flags;
708
709 spin_lock_irqsave(&hsotg->lock, flags);
710 dwc2_do_unreserve(hsotg, qh);
711 spin_unlock_irqrestore(&hsotg->lock, flags);
712 }
713
Douglas Anderson3bc04e22016-01-28 18:19:53 -0800714 if (qh->desc_list)
Paul Zimmerman7359d482013-03-11 17:47:59 -0700715 dwc2_hcd_qh_free_ddma(hsotg, qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700716 kfree(qh);
717}
718
719/**
Paul Zimmerman7359d482013-03-11 17:47:59 -0700720 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
721 * schedule if it is not already in the schedule. If the QH is already in
722 * the schedule, no action is taken.
723 *
724 * @hsotg: The HCD state structure for the DWC OTG controller
725 * @qh: The QH to add
726 *
727 * Return: 0 if successful, negative error code otherwise
728 */
729int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
730{
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300731 int status;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700732 u32 intr_mask;
733
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200734 if (dbg_qh(qh))
735 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700736
737 if (!list_empty(&qh->qh_list_entry))
738 /* QH already in a schedule */
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300739 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700740
Douglas Andersonced9eee2016-01-28 18:20:04 -0800741 if (!dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number) &&
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200742 !hsotg->frame_number) {
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800743 u16 new_frame;
744
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200745 dev_dbg(hsotg->dev,
746 "reset frame number counter\n");
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800747 new_frame = dwc2_frame_num_inc(hsotg->frame_number,
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200748 SCHEDULE_SLOP);
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800749
Douglas Andersonced9eee2016-01-28 18:20:04 -0800750 dwc2_sch_vdbg(hsotg, "QH=%p reset nxt=%04x=>%04x\n",
751 qh, qh->next_active_frame, new_frame);
752 qh->next_active_frame = new_frame;
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200753 }
754
Paul Zimmerman7359d482013-03-11 17:47:59 -0700755 /* Add the new QH to the appropriate schedule */
756 if (dwc2_qh_is_non_per(qh)) {
757 /* Always start in inactive schedule */
758 list_add_tail(&qh->qh_list_entry,
759 &hsotg->non_periodic_sched_inactive);
Dan Carpenter5e128472013-11-25 17:14:14 +0300760 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700761 }
762
Dan Carpenter5e128472013-11-25 17:14:14 +0300763 status = dwc2_schedule_periodic(hsotg, qh);
764 if (status)
765 return status;
766 if (!hsotg->periodic_qh_count) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300767 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300768 intr_mask |= GINTSTS_SOF;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300769 dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300770 }
771 hsotg->periodic_qh_count++;
772
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300773 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700774}
775
776/**
777 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
778 * schedule. Memory is not freed.
779 *
780 * @hsotg: The HCD state structure
781 * @qh: QH to remove from schedule
782 */
783void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
784{
785 u32 intr_mask;
786
787 dev_vdbg(hsotg->dev, "%s()\n", __func__);
788
789 if (list_empty(&qh->qh_list_entry))
790 /* QH is not in a schedule */
791 return;
792
793 if (dwc2_qh_is_non_per(qh)) {
794 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
795 hsotg->non_periodic_qh_ptr =
796 hsotg->non_periodic_qh_ptr->next;
797 list_del_init(&qh->qh_list_entry);
Dan Carpenter5e128472013-11-25 17:14:14 +0300798 return;
799 }
800
801 dwc2_deschedule_periodic(hsotg, qh);
802 hsotg->periodic_qh_count--;
803 if (!hsotg->periodic_qh_count) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300804 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300805 intr_mask &= ~GINTSTS_SOF;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300806 dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700807 }
808}
809
810/*
811 * Schedule the next continuing periodic split transfer
812 */
813static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
814 struct dwc2_qh *qh, u16 frame_number,
815 int sched_next_periodic_split)
816{
817 u16 incr;
Douglas Andersonced9eee2016-01-28 18:20:04 -0800818 u16 old_frame = qh->next_active_frame;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700819
820 if (sched_next_periodic_split) {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800821 qh->next_active_frame = frame_number;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700822 incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
823 if (dwc2_frame_num_le(frame_number, incr)) {
824 /*
825 * Allow one frame to elapse after start split
826 * microframe before scheduling complete split, but
827 * DON'T if we are doing the next start split in the
828 * same frame for an ISOC out
829 */
830 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
831 qh->ep_is_in != 0) {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800832 qh->next_active_frame = dwc2_frame_num_inc(
833 qh->next_active_frame, 1);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700834 }
835 }
836 } else {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800837 qh->next_active_frame =
838 dwc2_frame_num_inc(qh->start_split_frame,
839 qh->host_interval);
840 if (dwc2_frame_num_le(qh->next_active_frame, frame_number))
841 qh->next_active_frame = frame_number;
842 qh->next_active_frame |= 0x7;
843 qh->start_split_frame = qh->next_active_frame;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700844 }
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800845
Douglas Andersonced9eee2016-01-28 18:20:04 -0800846 dwc2_sch_vdbg(hsotg, "QH=%p next(%d) fn=%04x, nxt=%04x=>%04x (%+d)\n",
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800847 qh, sched_next_periodic_split, frame_number, old_frame,
Douglas Andersonced9eee2016-01-28 18:20:04 -0800848 qh->next_active_frame,
849 dwc2_frame_num_dec(qh->next_active_frame, old_frame));
Paul Zimmerman7359d482013-03-11 17:47:59 -0700850}
851
852/*
853 * Deactivates a QH. For non-periodic QHs, removes the QH from the active
854 * non-periodic schedule. The QH is added to the inactive non-periodic
855 * schedule if any QTDs are still attached to the QH.
856 *
857 * For periodic QHs, the QH is removed from the periodic queued schedule. If
858 * there are any QTDs still attached to the QH, the QH is added to either the
859 * periodic inactive schedule or the periodic ready schedule and its next
860 * scheduled frame is calculated. The QH is placed in the ready schedule if
861 * the scheduled frame has been reached already. Otherwise it's placed in the
862 * inactive schedule. If there are no QTDs attached to the QH, the QH is
863 * completely removed from the periodic schedule.
864 */
865void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
866 int sched_next_periodic_split)
867{
Dan Carpenter5e128472013-11-25 17:14:14 +0300868 u16 frame_number;
869
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200870 if (dbg_qh(qh))
871 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700872
873 if (dwc2_qh_is_non_per(qh)) {
874 dwc2_hcd_qh_unlink(hsotg, qh);
875 if (!list_empty(&qh->qtd_list))
876 /* Add back to inactive non-periodic schedule */
877 dwc2_hcd_qh_add(hsotg, qh);
Dan Carpenter5e128472013-11-25 17:14:14 +0300878 return;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700879 }
Dan Carpenter5e128472013-11-25 17:14:14 +0300880
881 frame_number = dwc2_hcd_get_frame_number(hsotg);
882
883 if (qh->do_split) {
884 dwc2_sched_periodic_split(hsotg, qh, frame_number,
885 sched_next_periodic_split);
886 } else {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800887 qh->next_active_frame = dwc2_frame_num_inc(
888 qh->next_active_frame, qh->host_interval);
889 if (dwc2_frame_num_le(qh->next_active_frame, frame_number))
890 qh->next_active_frame = frame_number;
Dan Carpenter5e128472013-11-25 17:14:14 +0300891 }
892
893 if (list_empty(&qh->qtd_list)) {
894 dwc2_hcd_qh_unlink(hsotg, qh);
895 return;
896 }
897 /*
898 * Remove from periodic_sched_queued and move to
899 * appropriate queue
900 */
901 if ((hsotg->core_params->uframe_sched > 0 &&
Douglas Andersonced9eee2016-01-28 18:20:04 -0800902 dwc2_frame_num_le(qh->next_active_frame, frame_number)) ||
Dan Carpenter5e128472013-11-25 17:14:14 +0300903 (hsotg->core_params->uframe_sched <= 0 &&
Douglas Andersonced9eee2016-01-28 18:20:04 -0800904 qh->next_active_frame == frame_number))
Douglas Anderson94ef7ae2016-01-28 18:19:56 -0800905 list_move_tail(&qh->qh_list_entry,
906 &hsotg->periodic_sched_ready);
Dan Carpenter5e128472013-11-25 17:14:14 +0300907 else
Douglas Anderson94ef7ae2016-01-28 18:19:56 -0800908 list_move_tail(&qh->qh_list_entry,
909 &hsotg->periodic_sched_inactive);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700910}
911
912/**
913 * dwc2_hcd_qtd_init() - Initializes a QTD structure
914 *
915 * @qtd: The QTD to initialize
916 * @urb: The associated URB
917 */
918void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
919{
920 qtd->urb = urb;
921 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
922 USB_ENDPOINT_XFER_CONTROL) {
923 /*
924 * The only time the QTD data toggle is used is on the data
925 * phase of control transfers. This phase always starts with
926 * DATA1.
927 */
928 qtd->data_toggle = DWC2_HC_PID_DATA1;
929 qtd->control_phase = DWC2_CONTROL_SETUP;
930 }
931
932 /* Start split */
933 qtd->complete_split = 0;
934 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
935 qtd->isoc_split_offset = 0;
936 qtd->in_process = 0;
937
938 /* Store the qtd ptr in the urb to reference the QTD */
939 urb->qtd = qtd;
940}
941
942/**
943 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
Gregory Herrero33ad2612015-04-29 22:09:15 +0200944 * Caller must hold driver lock.
Paul Zimmerman7359d482013-03-11 17:47:59 -0700945 *
946 * @hsotg: The DWC HCD structure
947 * @qtd: The QTD to add
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200948 * @qh: Queue head to add qtd to
Paul Zimmerman7359d482013-03-11 17:47:59 -0700949 *
950 * Return: 0 if successful, negative error code otherwise
951 *
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200952 * If the QH to which the QTD is added is not currently scheduled, it is placed
953 * into the proper schedule based on its EP type.
Paul Zimmerman7359d482013-03-11 17:47:59 -0700954 */
955int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200956 struct dwc2_qh *qh)
Paul Zimmerman7359d482013-03-11 17:47:59 -0700957{
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700958 int retval;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700959
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200960 if (unlikely(!qh)) {
961 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
962 retval = -EINVAL;
963 goto fail;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700964 }
965
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200966 retval = dwc2_hcd_qh_add(hsotg, qh);
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700967 if (retval)
968 goto fail;
969
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200970 qtd->qh = qh;
971 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700972
973 return 0;
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700974fail:
Paul Zimmerman7359d482013-03-11 17:47:59 -0700975 return retval;
976}