blob: 76f3c6596befaf1fd5cf3cc1cbed8e35b9fe1e5e [file] [log] [blame]
Paul Zimmerman7359d482013-03-11 17:47:59 -07001/*
2 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * This file contains the functions to manage Queue Heads and Queue
39 * Transfer Descriptors for Host mode
40 */
Douglas Andersonfb616e32016-01-28 18:20:08 -080041#include <linux/gcd.h>
Paul Zimmerman7359d482013-03-11 17:47:59 -070042#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/spinlock.h>
45#include <linux/interrupt.h>
46#include <linux/dma-mapping.h>
47#include <linux/io.h>
48#include <linux/slab.h>
49#include <linux/usb.h>
50
51#include <linux/usb/hcd.h>
52#include <linux/usb/ch11.h>
53
54#include "core.h"
55#include "hcd.h"
56
Douglas Anderson17dd5b62016-01-28 18:19:59 -080057/* Wait this long before releasing periodic reservation */
58#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
59
60/**
Douglas Andersonb951c6c2016-01-28 18:20:05 -080061 * dwc2_periodic_channel_available() - Checks that a channel is available for a
62 * periodic transfer
63 *
64 * @hsotg: The HCD state structure for the DWC OTG controller
65 *
66 * Return: 0 if successful, negative error code otherwise
67 */
68static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
69{
70 /*
71 * Currently assuming that there is a dedicated host channel for
72 * each periodic transaction plus at least one host channel for
73 * non-periodic transactions
74 */
75 int status;
76 int num_channels;
77
78 num_channels = hsotg->core_params->host_channels;
79 if (hsotg->periodic_channels + hsotg->non_periodic_channels <
80 num_channels
81 && hsotg->periodic_channels < num_channels - 1) {
82 status = 0;
83 } else {
84 dev_dbg(hsotg->dev,
85 "%s: Total channels: %d, Periodic: %d, "
86 "Non-periodic: %d\n", __func__, num_channels,
87 hsotg->periodic_channels, hsotg->non_periodic_channels);
88 status = -ENOSPC;
89 }
90
91 return status;
92}
93
94/**
95 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
96 * for the specified QH in the periodic schedule
97 *
98 * @hsotg: The HCD state structure for the DWC OTG controller
99 * @qh: QH containing periodic bandwidth required
100 *
101 * Return: 0 if successful, negative error code otherwise
102 *
103 * For simplicity, this calculation assumes that all the transfers in the
104 * periodic schedule may occur in the same (micro)frame
105 */
106static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
107 struct dwc2_qh *qh)
108{
109 int status;
110 s16 max_claimed_usecs;
111
112 status = 0;
113
114 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
115 /*
116 * High speed mode
117 * Max periodic usecs is 80% x 125 usec = 100 usec
118 */
119 max_claimed_usecs = 100 - qh->host_us;
120 } else {
121 /*
122 * Full speed mode
123 * Max periodic usecs is 90% x 1000 usec = 900 usec
124 */
125 max_claimed_usecs = 900 - qh->host_us;
126 }
127
128 if (hsotg->periodic_usecs > max_claimed_usecs) {
129 dev_err(hsotg->dev,
130 "%s: already claimed usecs %d, required usecs %d\n",
131 __func__, hsotg->periodic_usecs, qh->host_us);
132 status = -ENOSPC;
133 }
134
135 return status;
136}
137
138/**
139 * Microframe scheduler
140 * track the total use in hsotg->frame_usecs
141 * keep each qh use in qh->frame_usecs
142 * when surrendering the qh then donate the time back
143 */
144static const unsigned short max_uframe_usecs[] = {
145 100, 100, 100, 100, 100, 100, 30, 0
146};
147
148void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
149{
150 int i;
151
152 for (i = 0; i < 8; i++)
153 hsotg->frame_usecs[i] = max_uframe_usecs[i];
154}
155
156static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
157{
158 unsigned short utime = qh->host_us;
159 int i;
160
161 for (i = 0; i < 8; i++) {
162 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
163 if (utime <= hsotg->frame_usecs[i]) {
164 hsotg->frame_usecs[i] -= utime;
165 qh->frame_usecs[i] += utime;
166 return i;
167 }
168 }
169 return -ENOSPC;
170}
171
172/*
173 * use this for FS apps that can span multiple uframes
174 */
175static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
176{
177 unsigned short utime = qh->host_us;
178 unsigned short xtime;
179 int t_left;
180 int i;
181 int j;
182 int k;
183
184 for (i = 0; i < 8; i++) {
185 if (hsotg->frame_usecs[i] <= 0)
186 continue;
187
188 /*
189 * we need n consecutive slots so use j as a start slot
190 * j plus j+1 must be enough time (for now)
191 */
192 xtime = hsotg->frame_usecs[i];
193 for (j = i + 1; j < 8; j++) {
194 /*
195 * if we add this frame remaining time to xtime we may
196 * be OK, if not we need to test j for a complete frame
197 */
198 if (xtime + hsotg->frame_usecs[j] < utime) {
199 if (hsotg->frame_usecs[j] <
200 max_uframe_usecs[j])
201 continue;
202 }
203 if (xtime >= utime) {
204 t_left = utime;
205 for (k = i; k < 8; k++) {
206 t_left -= hsotg->frame_usecs[k];
207 if (t_left <= 0) {
208 qh->frame_usecs[k] +=
209 hsotg->frame_usecs[k]
210 + t_left;
211 hsotg->frame_usecs[k] = -t_left;
212 return i;
213 } else {
214 qh->frame_usecs[k] +=
215 hsotg->frame_usecs[k];
216 hsotg->frame_usecs[k] = 0;
217 }
218 }
219 }
220 /* add the frame time to x time */
221 xtime += hsotg->frame_usecs[j];
222 /* we must have a fully available next frame or break */
223 if (xtime < utime &&
224 hsotg->frame_usecs[j] == max_uframe_usecs[j])
225 continue;
226 }
227 }
228 return -ENOSPC;
229}
230
231static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
232{
233 int ret;
234
235 if (qh->dev_speed == USB_SPEED_HIGH) {
236 /* if this is a hs transaction we need a full frame */
237 ret = dwc2_find_single_uframe(hsotg, qh);
238 } else {
239 /*
240 * if this is a fs transaction we may need a sequence
241 * of frames
242 */
243 ret = dwc2_find_multi_uframe(hsotg, qh);
244 }
245 return ret;
246}
247
248/**
Douglas Andersonfb616e32016-01-28 18:20:08 -0800249 * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled
250 *
251 * Takes a qh that has already been scheduled (which means we know we have the
252 * bandwdith reserved for us) and set the next_active_frame and the
253 * start_active_frame.
254 *
255 * This is expected to be called on qh's that weren't previously actively
256 * running. It just picks the next frame that we can fit into without any
257 * thought about the past.
258 *
259 * @hsotg: The HCD state structure for the DWC OTG controller
260 * @qh: QH for a periodic endpoint
261 *
262 */
263static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
264{
265 u16 frame_number;
266 u16 earliest_frame;
267 u16 next_active_frame;
268 u16 interval;
269
270 /*
271 * Use the real frame number rather than the cached value as of the
272 * last SOF to give us a little extra slop.
273 */
274 frame_number = dwc2_hcd_get_frame_number(hsotg);
275
276 /*
277 * We wouldn't want to start any earlier than the next frame just in
278 * case the frame number ticks as we're doing this calculation.
279 *
280 * NOTE: if we could quantify how long till we actually get scheduled
281 * we might be able to avoid the "+ 1" by looking at the upper part of
282 * HFNUM (the FRREM field). For now we'll just use the + 1 though.
283 */
284 earliest_frame = dwc2_frame_num_inc(frame_number, 1);
285 next_active_frame = earliest_frame;
286
287 /* Get the "no microframe schduler" out of the way... */
288 if (hsotg->core_params->uframe_sched <= 0) {
289 if (qh->do_split)
290 /* Splits are active at microframe 0 minus 1 */
291 next_active_frame |= 0x7;
292 goto exit;
293 }
294
295 /* Adjust interval as per high speed schedule which has 8 uFrame */
296 interval = gcd(qh->host_interval, 8);
297
298 /*
299 * We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've
300 * done the gcd(), so it's safe to move to the beginning of the current
301 * interval like this.
302 *
303 * After this we might be before earliest_frame, but don't worry,
304 * we'll fix it...
305 */
306 next_active_frame = (next_active_frame / interval) * interval;
307
308 /*
309 * Actually choose to start at the frame number we've been
310 * scheduled for.
311 */
312 next_active_frame = dwc2_frame_num_inc(next_active_frame,
313 qh->assigned_uframe);
314
315 /*
316 * We actually need 1 frame before since the next_active_frame is
317 * the frame number we'll be put on the ready list and we won't be on
318 * the bus until 1 frame later.
319 */
320 next_active_frame = dwc2_frame_num_dec(next_active_frame, 1);
321
322 /*
323 * By now we might actually be before the earliest_frame. Let's move
324 * up intervals until we're not.
325 */
326 while (dwc2_frame_num_gt(earliest_frame, next_active_frame))
327 next_active_frame = dwc2_frame_num_inc(next_active_frame,
328 interval);
329
330exit:
331 qh->next_active_frame = next_active_frame;
332 qh->start_active_frame = next_active_frame;
333
334 dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n",
335 qh, frame_number, qh->next_active_frame);
336}
337
338/**
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800339 * dwc2_do_reserve() - Make a periodic reservation
340 *
341 * Try to allocate space in the periodic schedule. Depending on parameters
342 * this might use the microframe scheduler or the dumb scheduler.
343 *
344 * @hsotg: The HCD state structure for the DWC OTG controller
345 * @qh: QH for the periodic transfer.
346 *
347 * Returns: 0 upon success; error upon failure.
348 */
349static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
350{
351 int status;
352
353 if (hsotg->core_params->uframe_sched > 0) {
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800354 status = dwc2_find_uframe(hsotg, qh);
Douglas Andersonfb616e32016-01-28 18:20:08 -0800355 if (status >= 0)
356 qh->assigned_uframe = status;
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800357 } else {
358 status = dwc2_periodic_channel_available(hsotg);
359 if (status) {
360 dev_info(hsotg->dev,
361 "%s: No host channel available for periodic transfer\n",
362 __func__);
363 return status;
364 }
365
366 status = dwc2_check_periodic_bandwidth(hsotg, qh);
367 }
368
369 if (status) {
370 dev_dbg(hsotg->dev,
371 "%s: Insufficient periodic bandwidth for periodic transfer\n",
372 __func__);
373 return status;
374 }
375
376 if (hsotg->core_params->uframe_sched <= 0)
377 /* Reserve periodic channel */
378 hsotg->periodic_channels++;
379
380 /* Update claimed usecs per (micro)frame */
381 hsotg->periodic_usecs += qh->host_us;
382
Douglas Andersonfb616e32016-01-28 18:20:08 -0800383 dwc2_pick_first_frame(hsotg, qh);
384
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800385 return 0;
386}
387
388/**
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800389 * dwc2_do_unreserve() - Actually release the periodic reservation
390 *
391 * This function actually releases the periodic bandwidth that was reserved
392 * by the given qh.
393 *
394 * @hsotg: The HCD state structure for the DWC OTG controller
395 * @qh: QH for the periodic transfer.
396 */
397static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
398{
399 assert_spin_locked(&hsotg->lock);
400
401 WARN_ON(!qh->unreserve_pending);
402
403 /* No more unreserve pending--we're doing it */
404 qh->unreserve_pending = false;
405
406 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
407 list_del_init(&qh->qh_list_entry);
408
409 /* Update claimed usecs per (micro)frame */
Douglas Andersonced9eee2016-01-28 18:20:04 -0800410 hsotg->periodic_usecs -= qh->host_us;
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800411
412 if (hsotg->core_params->uframe_sched > 0) {
413 int i;
414
415 for (i = 0; i < 8; i++) {
416 hsotg->frame_usecs[i] += qh->frame_usecs[i];
417 qh->frame_usecs[i] = 0;
418 }
419 } else {
420 /* Release periodic channel reservation */
421 hsotg->periodic_channels--;
422 }
423}
424
425/**
426 * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
427 *
428 * According to the kernel doc for usb_submit_urb() (specifically the part about
429 * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
430 * long as a device driver keeps submitting. Since we're using HCD_BH to give
431 * back the URB we need to give the driver a little bit of time before we
432 * release the reservation. This worker is called after the appropriate
433 * delay.
434 *
435 * @work: Pointer to a qh unreserve_work.
436 */
437static void dwc2_unreserve_timer_fn(unsigned long data)
438{
439 struct dwc2_qh *qh = (struct dwc2_qh *)data;
440 struct dwc2_hsotg *hsotg = qh->hsotg;
441 unsigned long flags;
442
443 /*
444 * Wait for the lock, or for us to be scheduled again. We
445 * could be scheduled again if:
446 * - We started executing but didn't get the lock yet.
447 * - A new reservation came in, but cancel didn't take effect
448 * because we already started executing.
449 * - The timer has been kicked again.
450 * In that case cancel and wait for the next call.
451 */
452 while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
453 if (timer_pending(&qh->unreserve_timer))
454 return;
455 }
456
457 /*
458 * Might be no more unreserve pending if:
459 * - We started executing but didn't get the lock yet.
460 * - A new reservation came in, but cancel didn't take effect
461 * because we already started executing.
462 *
463 * We can't put this in the loop above because unreserve_pending needs
464 * to be accessed under lock, so we can only check it once we got the
465 * lock.
466 */
467 if (qh->unreserve_pending)
468 dwc2_do_unreserve(hsotg, qh);
469
470 spin_unlock_irqrestore(&hsotg->lock, flags);
471}
472
Paul Zimmerman7359d482013-03-11 17:47:59 -0700473/**
Douglas Andersonb951c6c2016-01-28 18:20:05 -0800474 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
475 * host channel is large enough to handle the maximum data transfer in a single
476 * (micro)frame for a periodic transfer
477 *
478 * @hsotg: The HCD state structure for the DWC OTG controller
479 * @qh: QH for a periodic endpoint
480 *
481 * Return: 0 if successful, negative error code otherwise
482 */
483static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
484 struct dwc2_qh *qh)
485{
486 u32 max_xfer_size;
487 u32 max_channel_xfer_size;
488 int status = 0;
489
490 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
491 max_channel_xfer_size = hsotg->core_params->max_transfer_size;
492
493 if (max_xfer_size > max_channel_xfer_size) {
494 dev_err(hsotg->dev,
495 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
496 __func__, max_xfer_size, max_channel_xfer_size);
497 status = -ENOSPC;
498 }
499
500 return status;
501}
502
503/**
504 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
505 * the periodic schedule
506 *
507 * @hsotg: The HCD state structure for the DWC OTG controller
508 * @qh: QH for the periodic transfer. The QH should already contain the
509 * scheduling information.
510 *
511 * Return: 0 if successful, negative error code otherwise
512 */
513static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
514{
515 int status;
516
517 status = dwc2_check_max_xfer_size(hsotg, qh);
518 if (status) {
519 dev_dbg(hsotg->dev,
520 "%s: Channel max transfer size too small for periodic transfer\n",
521 __func__);
522 return status;
523 }
524
525 /* Cancel pending unreserve; if canceled OK, unreserve was pending */
526 if (del_timer(&qh->unreserve_timer))
527 WARN_ON(!qh->unreserve_pending);
528
529 /*
530 * Only need to reserve if there's not an unreserve pending, since if an
531 * unreserve is pending then by definition our old reservation is still
532 * valid. Unreserve might still be pending even if we didn't cancel if
533 * dwc2_unreserve_timer_fn() already started. Code in the timer handles
534 * that case.
535 */
536 if (!qh->unreserve_pending) {
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800537 status = dwc2_do_reserve(hsotg, qh);
538 if (status)
Douglas Andersonb951c6c2016-01-28 18:20:05 -0800539 return status;
Douglas Andersonfb616e32016-01-28 18:20:08 -0800540 } else {
541 /*
542 * It might have been a while, so make sure that frame_number
543 * is still good. Note: we could also try to use the similar
544 * dwc2_next_periodic_start() but that schedules much more
545 * tightly and we might need to hurry and queue things up.
546 */
547 if (dwc2_frame_num_le(qh->next_active_frame,
548 hsotg->frame_number))
549 dwc2_pick_first_frame(hsotg, qh);
Douglas Andersonb951c6c2016-01-28 18:20:05 -0800550 }
551
552 qh->unreserve_pending = 0;
553
554 if (hsotg->core_params->dma_desc_enable > 0)
555 /* Don't rely on SOF and start in ready schedule */
556 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
557 else
558 /* Always start in inactive schedule */
559 list_add_tail(&qh->qh_list_entry,
560 &hsotg->periodic_sched_inactive);
561
Douglas Anderson2d3f1392016-01-28 18:20:06 -0800562 return 0;
Douglas Andersonb951c6c2016-01-28 18:20:05 -0800563}
564
565/**
566 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
567 * from the periodic schedule
568 *
569 * @hsotg: The HCD state structure for the DWC OTG controller
570 * @qh: QH for the periodic transfer
571 */
572static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
573 struct dwc2_qh *qh)
574{
575 bool did_modify;
576
577 assert_spin_locked(&hsotg->lock);
578
579 /*
580 * Schedule the unreserve to happen in a little bit. Cases here:
581 * - Unreserve worker might be sitting there waiting to grab the lock.
582 * In this case it will notice it's been schedule again and will
583 * quit.
584 * - Unreserve worker might not be scheduled.
585 *
586 * We should never already be scheduled since dwc2_schedule_periodic()
587 * should have canceled the scheduled unreserve timer (hence the
588 * warning on did_modify).
589 *
590 * We add + 1 to the timer to guarantee that at least 1 jiffy has
591 * passed (otherwise if the jiffy counter might tick right after we
592 * read it and we'll get no delay).
593 */
594 did_modify = mod_timer(&qh->unreserve_timer,
595 jiffies + DWC2_UNRESERVE_DELAY + 1);
596 WARN_ON(did_modify);
597 qh->unreserve_pending = 1;
598
599 list_del_init(&qh->qh_list_entry);
600}
601
602/**
Paul Zimmerman7359d482013-03-11 17:47:59 -0700603 * dwc2_qh_init() - Initializes a QH structure
604 *
605 * @hsotg: The HCD state structure for the DWC OTG controller
606 * @qh: The QH to init
607 * @urb: Holds the information about the device/endpoint needed to initialize
608 * the QH
609 */
Paul Zimmerman7359d482013-03-11 17:47:59 -0700610static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
611 struct dwc2_hcd_urb *urb)
612{
613 int dev_speed, hub_addr, hub_port;
614 char *speed, *type;
615
616 dev_vdbg(hsotg->dev, "%s()\n", __func__);
617
618 /* Initialize QH */
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800619 qh->hsotg = hsotg;
620 setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
621 (unsigned long)qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700622 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
623 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
624
625 qh->data_toggle = DWC2_HC_PID_DATA0;
626 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
627 INIT_LIST_HEAD(&qh->qtd_list);
628 INIT_LIST_HEAD(&qh->qh_list_entry);
629
630 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
631 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
632
633 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
634
635 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
636 hub_addr != 0 && hub_addr != 1) {
637 dev_vdbg(hsotg->dev,
638 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
639 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
640 hub_port);
641 qh->do_split = 1;
642 }
643
644 if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
645 qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
646 /* Compute scheduling parameters once and save them */
647 u32 hprt, prtspd;
648
649 /* Todo: Account for split transfers in the bus time */
650 int bytecount =
651 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
652
Douglas Andersonced9eee2016-01-28 18:20:04 -0800653 qh->host_us = NS_TO_US(usb_calc_bus_time(qh->do_split ?
654 USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
655 qh->ep_type == USB_ENDPOINT_XFER_ISOC,
656 bytecount));
Gregory Herrerodd81dd72015-09-22 15:16:52 +0200657
Douglas Andersonced9eee2016-01-28 18:20:04 -0800658 qh->host_interval = urb->interval;
659 dwc2_sch_dbg(hsotg, "QH=%p init nxt=%04x, fn=%04x, int=%#x\n",
660 qh, qh->next_active_frame, hsotg->frame_number,
661 qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700662#if 0
663 /* Increase interrupt polling rate for debugging */
664 if (qh->ep_type == USB_ENDPOINT_XFER_INT)
Douglas Andersonced9eee2016-01-28 18:20:04 -0800665 qh->host_interval = 8;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700666#endif
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300667 hprt = dwc2_readl(hsotg->regs + HPRT0);
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200668 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700669 if (prtspd == HPRT0_SPD_HIGH_SPEED &&
670 (dev_speed == USB_SPEED_LOW ||
671 dev_speed == USB_SPEED_FULL)) {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800672 qh->host_interval *= 8;
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800673 dwc2_sch_dbg(hsotg,
Douglas Andersonced9eee2016-01-28 18:20:04 -0800674 "QH=%p init*8 nxt=%04x, fn=%04x, int=%#x\n",
675 qh, qh->next_active_frame,
676 hsotg->frame_number, qh->host_interval);
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800677
Paul Zimmerman7359d482013-03-11 17:47:59 -0700678 }
Douglas Andersonced9eee2016-01-28 18:20:04 -0800679 dev_dbg(hsotg->dev, "interval=%d\n", qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700680 }
681
682 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
683 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
684 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
685 dwc2_hcd_get_dev_addr(&urb->pipe_info));
686 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
687 dwc2_hcd_get_ep_num(&urb->pipe_info),
688 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
689
690 qh->dev_speed = dev_speed;
691
692 switch (dev_speed) {
693 case USB_SPEED_LOW:
694 speed = "low";
695 break;
696 case USB_SPEED_FULL:
697 speed = "full";
698 break;
699 case USB_SPEED_HIGH:
700 speed = "high";
701 break;
702 default:
703 speed = "?";
704 break;
705 }
706 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
707
708 switch (qh->ep_type) {
709 case USB_ENDPOINT_XFER_ISOC:
710 type = "isochronous";
711 break;
712 case USB_ENDPOINT_XFER_INT:
713 type = "interrupt";
714 break;
715 case USB_ENDPOINT_XFER_CONTROL:
716 type = "control";
717 break;
718 case USB_ENDPOINT_XFER_BULK:
719 type = "bulk";
720 break;
721 default:
722 type = "?";
723 break;
724 }
725
726 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
727
728 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
729 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
Douglas Andersonced9eee2016-01-28 18:20:04 -0800730 qh->host_us);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700731 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
Douglas Andersonced9eee2016-01-28 18:20:04 -0800732 qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700733 }
734}
735
736/**
737 * dwc2_hcd_qh_create() - Allocates and initializes a QH
738 *
739 * @hsotg: The HCD state structure for the DWC OTG controller
740 * @urb: Holds the information about the device/endpoint needed
741 * to initialize the QH
742 * @atomic_alloc: Flag to do atomic allocation if needed
743 *
744 * Return: Pointer to the newly allocated QH, or NULL on error
745 */
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200746struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
Paul Zimmerman7359d482013-03-11 17:47:59 -0700747 struct dwc2_hcd_urb *urb,
748 gfp_t mem_flags)
749{
750 struct dwc2_qh *qh;
751
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700752 if (!urb->priv)
753 return NULL;
754
Paul Zimmerman7359d482013-03-11 17:47:59 -0700755 /* Allocate memory */
756 qh = kzalloc(sizeof(*qh), mem_flags);
757 if (!qh)
758 return NULL;
759
760 dwc2_qh_init(hsotg, qh, urb);
761
762 if (hsotg->core_params->dma_desc_enable > 0 &&
763 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
764 dwc2_hcd_qh_free(hsotg, qh);
765 return NULL;
766 }
767
768 return qh;
769}
770
771/**
772 * dwc2_hcd_qh_free() - Frees the QH
773 *
774 * @hsotg: HCD instance
775 * @qh: The QH to free
776 *
777 * QH should already be removed from the list. QTD list should already be empty
778 * if called from URB Dequeue.
779 *
780 * Must NOT be called with interrupt disabled or spinlock held
781 */
782void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
783{
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800784 /* Make sure any unreserve work is finished. */
785 if (del_timer_sync(&qh->unreserve_timer)) {
786 unsigned long flags;
787
788 spin_lock_irqsave(&hsotg->lock, flags);
789 dwc2_do_unreserve(hsotg, qh);
790 spin_unlock_irqrestore(&hsotg->lock, flags);
791 }
792
Douglas Anderson3bc04e22016-01-28 18:19:53 -0800793 if (qh->desc_list)
Paul Zimmerman7359d482013-03-11 17:47:59 -0700794 dwc2_hcd_qh_free_ddma(hsotg, qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700795 kfree(qh);
796}
797
798/**
Paul Zimmerman7359d482013-03-11 17:47:59 -0700799 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
800 * schedule if it is not already in the schedule. If the QH is already in
801 * the schedule, no action is taken.
802 *
803 * @hsotg: The HCD state structure for the DWC OTG controller
804 * @qh: The QH to add
805 *
806 * Return: 0 if successful, negative error code otherwise
807 */
808int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
809{
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300810 int status;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700811 u32 intr_mask;
812
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200813 if (dbg_qh(qh))
814 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700815
816 if (!list_empty(&qh->qh_list_entry))
817 /* QH already in a schedule */
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300818 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700819
820 /* Add the new QH to the appropriate schedule */
821 if (dwc2_qh_is_non_per(qh)) {
Douglas Andersonfb616e32016-01-28 18:20:08 -0800822 /* Schedule right away */
823 qh->start_active_frame = hsotg->frame_number;
824 qh->next_active_frame = qh->start_active_frame;
825
Paul Zimmerman7359d482013-03-11 17:47:59 -0700826 /* Always start in inactive schedule */
827 list_add_tail(&qh->qh_list_entry,
828 &hsotg->non_periodic_sched_inactive);
Dan Carpenter5e128472013-11-25 17:14:14 +0300829 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700830 }
831
Dan Carpenter5e128472013-11-25 17:14:14 +0300832 status = dwc2_schedule_periodic(hsotg, qh);
833 if (status)
834 return status;
835 if (!hsotg->periodic_qh_count) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300836 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300837 intr_mask |= GINTSTS_SOF;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300838 dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300839 }
840 hsotg->periodic_qh_count++;
841
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300842 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700843}
844
845/**
846 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
847 * schedule. Memory is not freed.
848 *
849 * @hsotg: The HCD state structure
850 * @qh: QH to remove from schedule
851 */
852void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
853{
854 u32 intr_mask;
855
856 dev_vdbg(hsotg->dev, "%s()\n", __func__);
857
858 if (list_empty(&qh->qh_list_entry))
859 /* QH is not in a schedule */
860 return;
861
862 if (dwc2_qh_is_non_per(qh)) {
863 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
864 hsotg->non_periodic_qh_ptr =
865 hsotg->non_periodic_qh_ptr->next;
866 list_del_init(&qh->qh_list_entry);
Dan Carpenter5e128472013-11-25 17:14:14 +0300867 return;
868 }
869
870 dwc2_deschedule_periodic(hsotg, qh);
871 hsotg->periodic_qh_count--;
872 if (!hsotg->periodic_qh_count) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300873 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300874 intr_mask &= ~GINTSTS_SOF;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300875 dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700876 }
877}
878
Douglas Andersonfb616e32016-01-28 18:20:08 -0800879/**
880 * dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split.
881 *
882 * This is called for setting next_active_frame for periodic splits for all but
883 * the first packet of the split. Confusing? I thought so...
884 *
885 * Periodic splits are single low/full speed transfers that we end up splitting
886 * up into several high speed transfers. They always fit into one full (1 ms)
887 * frame but might be split over several microframes (125 us each). We to put
888 * each of the parts on a very specific high speed frame.
889 *
890 * This function figures out where the next active uFrame needs to be.
891 *
892 * @hsotg: The HCD state structure
893 * @qh: QH for the periodic transfer.
894 * @frame_number: The current frame number.
895 *
896 * Return: number missed by (or 0 if we didn't miss).
Paul Zimmerman7359d482013-03-11 17:47:59 -0700897 */
Douglas Andersonfb616e32016-01-28 18:20:08 -0800898static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg,
899 struct dwc2_qh *qh, u16 frame_number)
Paul Zimmerman7359d482013-03-11 17:47:59 -0700900{
Douglas Andersonced9eee2016-01-28 18:20:04 -0800901 u16 old_frame = qh->next_active_frame;
Douglas Andersonfb616e32016-01-28 18:20:08 -0800902 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
903 int missed = 0;
904 u16 incr;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700905
Douglas Andersonfb616e32016-01-28 18:20:08 -0800906 /*
907 * Basically: increment 1 normally, but 2 right after the start split
908 * (except for ISOC out).
909 */
910 if (old_frame == qh->start_active_frame &&
911 !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
912 incr = 2;
913 else
914 incr = 1;
915
916 qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
917
918 /*
919 * Note that it's OK for frame_number to be 1 frame past
920 * next_active_frame. Remember that next_active_frame is supposed to
921 * be 1 frame _before_ when we want to be scheduled. If we're 1 frame
922 * past it just means schedule ASAP.
923 *
924 * It's _not_ OK, however, if we're more than one frame past.
925 */
926 if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
927 /*
928 * OOPS, we missed. That's actually pretty bad since
929 * the hub will be unhappy; try ASAP I guess.
930 */
931 missed = dwc2_frame_num_dec(prev_frame_number,
932 qh->next_active_frame);
Douglas Andersonced9eee2016-01-28 18:20:04 -0800933 qh->next_active_frame = frame_number;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700934 }
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800935
Douglas Andersonfb616e32016-01-28 18:20:08 -0800936 return missed;
937}
938
939/**
940 * dwc2_next_periodic_start() - Set next_active_frame for next transfer start
941 *
942 * This is called for setting next_active_frame for a periodic transfer for
943 * all cases other than midway through a periodic split. This will also update
944 * start_active_frame.
945 *
946 * Since we _always_ keep start_active_frame as the start of the previous
947 * transfer this is normally pretty easy: we just add our interval to
948 * start_active_frame and we've got our answer.
949 *
950 * The tricks come into play if we miss. In that case we'll look for the next
951 * slot we can fit into.
952 *
953 * @hsotg: The HCD state structure
954 * @qh: QH for the periodic transfer.
955 * @frame_number: The current frame number.
956 *
957 * Return: number missed by (or 0 if we didn't miss).
958 */
959static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg,
960 struct dwc2_qh *qh, u16 frame_number)
961{
962 int missed = 0;
963 u16 interval = qh->host_interval;
964 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
965
966 qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
967 interval);
968
969 /*
970 * The dwc2_frame_num_gt() function used below won't work terribly well
971 * with if we just incremented by a really large intervals since the
972 * frame counter only goes to 0x3fff. It's terribly unlikely that we
973 * will have missed in this case anyway. Just go to exit. If we want
974 * to try to do better we'll need to keep track of a bigger counter
975 * somewhere in the driver and handle overflows.
976 */
977 if (interval >= 0x1000)
978 goto exit;
979
980 /*
981 * Test for misses, which is when it's too late to schedule.
982 *
983 * A few things to note:
984 * - We compare against prev_frame_number since start_active_frame
985 * and next_active_frame are always 1 frame before we want things
986 * to be active and we assume we can still get scheduled in the
987 * current frame number.
Douglas Anderson9cf1a602016-01-28 18:20:11 -0800988 * - It's possible for start_active_frame (now incremented) to be
989 * next_active_frame if we got an EO MISS (even_odd miss) which
990 * basically means that we detected there wasn't enough time for
991 * the last packet and dwc2_hc_set_even_odd_frame() rescheduled us
992 * at the last second. We want to make sure we don't schedule
993 * another transfer for the same frame. My test webcam doesn't seem
994 * terribly upset by missing a transfer but really doesn't like when
995 * we do two transfers in the same frame.
Douglas Andersonfb616e32016-01-28 18:20:08 -0800996 * - Some misses are expected. Specifically, in order to work
997 * perfectly dwc2 really needs quite spectacular interrupt latency
998 * requirements. It needs to be able to handle its interrupts
999 * completely within 125 us of them being asserted. That not only
1000 * means that the dwc2 interrupt handler needs to be fast but it
1001 * means that nothing else in the system has to block dwc2 for a long
1002 * time. We can help with the dwc2 parts of this, but it's hard to
1003 * guarantee that a system will have interrupt latency < 125 us, so
1004 * we have to be robust to some misses.
1005 */
Douglas Anderson9cf1a602016-01-28 18:20:11 -08001006 if (qh->start_active_frame == qh->next_active_frame ||
1007 dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
Douglas Andersonfb616e32016-01-28 18:20:08 -08001008 u16 ideal_start = qh->start_active_frame;
1009
1010 /* Adjust interval as per gcd with plan length. */
1011 interval = gcd(interval, 8);
1012
1013 do {
1014 qh->start_active_frame = dwc2_frame_num_inc(
1015 qh->start_active_frame, interval);
1016 } while (dwc2_frame_num_gt(prev_frame_number,
1017 qh->start_active_frame));
1018
1019 missed = dwc2_frame_num_dec(qh->start_active_frame,
1020 ideal_start);
1021 }
1022
1023exit:
1024 qh->next_active_frame = qh->start_active_frame;
1025
1026 return missed;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001027}
1028
1029/*
1030 * Deactivates a QH. For non-periodic QHs, removes the QH from the active
1031 * non-periodic schedule. The QH is added to the inactive non-periodic
1032 * schedule if any QTDs are still attached to the QH.
1033 *
1034 * For periodic QHs, the QH is removed from the periodic queued schedule. If
1035 * there are any QTDs still attached to the QH, the QH is added to either the
1036 * periodic inactive schedule or the periodic ready schedule and its next
1037 * scheduled frame is calculated. The QH is placed in the ready schedule if
1038 * the scheduled frame has been reached already. Otherwise it's placed in the
1039 * inactive schedule. If there are no QTDs attached to the QH, the QH is
1040 * completely removed from the periodic schedule.
1041 */
1042void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
1043 int sched_next_periodic_split)
1044{
Douglas Andersonfb616e32016-01-28 18:20:08 -08001045 u16 old_frame = qh->next_active_frame;
Dan Carpenter5e128472013-11-25 17:14:14 +03001046 u16 frame_number;
Douglas Andersonfb616e32016-01-28 18:20:08 -08001047 int missed;
Dan Carpenter5e128472013-11-25 17:14:14 +03001048
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001049 if (dbg_qh(qh))
1050 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001051
1052 if (dwc2_qh_is_non_per(qh)) {
1053 dwc2_hcd_qh_unlink(hsotg, qh);
1054 if (!list_empty(&qh->qtd_list))
1055 /* Add back to inactive non-periodic schedule */
1056 dwc2_hcd_qh_add(hsotg, qh);
Dan Carpenter5e128472013-11-25 17:14:14 +03001057 return;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001058 }
Dan Carpenter5e128472013-11-25 17:14:14 +03001059
Douglas Andersonfb616e32016-01-28 18:20:08 -08001060 /*
1061 * Use the real frame number rather than the cached value as of the
1062 * last SOF just to get us a little closer to reality. Note that
1063 * means we don't actually know if we've already handled the SOF
1064 * interrupt for this frame.
1065 */
Dan Carpenter5e128472013-11-25 17:14:14 +03001066 frame_number = dwc2_hcd_get_frame_number(hsotg);
1067
Douglas Andersonfb616e32016-01-28 18:20:08 -08001068 if (sched_next_periodic_split)
1069 missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
1070 else
1071 missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
1072
1073 dwc2_sch_vdbg(hsotg,
1074 "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n",
1075 qh, sched_next_periodic_split, frame_number, old_frame,
1076 qh->next_active_frame,
1077 dwc2_frame_num_dec(qh->next_active_frame, old_frame),
1078 missed, missed ? "MISS" : "");
Dan Carpenter5e128472013-11-25 17:14:14 +03001079
1080 if (list_empty(&qh->qtd_list)) {
1081 dwc2_hcd_qh_unlink(hsotg, qh);
1082 return;
1083 }
Douglas Andersonfb616e32016-01-28 18:20:08 -08001084
Dan Carpenter5e128472013-11-25 17:14:14 +03001085 /*
1086 * Remove from periodic_sched_queued and move to
1087 * appropriate queue
Douglas Andersonfb616e32016-01-28 18:20:08 -08001088 *
1089 * Note: we purposely use the frame_number from the "hsotg" structure
1090 * since we know SOF interrupt will handle future frames.
Dan Carpenter5e128472013-11-25 17:14:14 +03001091 */
Douglas Andersonfb616e32016-01-28 18:20:08 -08001092 if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
Douglas Anderson94ef7ae2016-01-28 18:19:56 -08001093 list_move_tail(&qh->qh_list_entry,
1094 &hsotg->periodic_sched_ready);
Dan Carpenter5e128472013-11-25 17:14:14 +03001095 else
Douglas Anderson94ef7ae2016-01-28 18:19:56 -08001096 list_move_tail(&qh->qh_list_entry,
1097 &hsotg->periodic_sched_inactive);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001098}
1099
1100/**
1101 * dwc2_hcd_qtd_init() - Initializes a QTD structure
1102 *
1103 * @qtd: The QTD to initialize
1104 * @urb: The associated URB
1105 */
1106void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
1107{
1108 qtd->urb = urb;
1109 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
1110 USB_ENDPOINT_XFER_CONTROL) {
1111 /*
1112 * The only time the QTD data toggle is used is on the data
1113 * phase of control transfers. This phase always starts with
1114 * DATA1.
1115 */
1116 qtd->data_toggle = DWC2_HC_PID_DATA1;
1117 qtd->control_phase = DWC2_CONTROL_SETUP;
1118 }
1119
1120 /* Start split */
1121 qtd->complete_split = 0;
1122 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1123 qtd->isoc_split_offset = 0;
1124 qtd->in_process = 0;
1125
1126 /* Store the qtd ptr in the urb to reference the QTD */
1127 urb->qtd = qtd;
1128}
1129
1130/**
1131 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
Gregory Herrero33ad2612015-04-29 22:09:15 +02001132 * Caller must hold driver lock.
Paul Zimmerman7359d482013-03-11 17:47:59 -07001133 *
1134 * @hsotg: The DWC HCD structure
1135 * @qtd: The QTD to add
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02001136 * @qh: Queue head to add qtd to
Paul Zimmerman7359d482013-03-11 17:47:59 -07001137 *
1138 * Return: 0 if successful, negative error code otherwise
1139 *
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02001140 * If the QH to which the QTD is added is not currently scheduled, it is placed
1141 * into the proper schedule based on its EP type.
Paul Zimmerman7359d482013-03-11 17:47:59 -07001142 */
1143int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02001144 struct dwc2_qh *qh)
Paul Zimmerman7359d482013-03-11 17:47:59 -07001145{
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -07001146 int retval;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001147
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02001148 if (unlikely(!qh)) {
1149 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
1150 retval = -EINVAL;
1151 goto fail;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001152 }
1153
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02001154 retval = dwc2_hcd_qh_add(hsotg, qh);
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -07001155 if (retval)
1156 goto fail;
1157
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02001158 qtd->qh = qh;
1159 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -07001160
1161 return 0;
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -07001162fail:
Paul Zimmerman7359d482013-03-11 17:47:59 -07001163 return retval;
1164}