blob: 39f4de6279f8324ad8ac2ff6c62ad807ddfb2879 [file] [log] [blame]
Paul Zimmerman7359d482013-03-11 17:47:59 -07001/*
2 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * This file contains the functions to manage Queue Heads and Queue
39 * Transfer Descriptors for Host mode
40 */
41#include <linux/kernel.h>
42#include <linux/module.h>
43#include <linux/spinlock.h>
44#include <linux/interrupt.h>
45#include <linux/dma-mapping.h>
46#include <linux/io.h>
47#include <linux/slab.h>
48#include <linux/usb.h>
49
50#include <linux/usb/hcd.h>
51#include <linux/usb/ch11.h>
52
53#include "core.h"
54#include "hcd.h"
55
Douglas Anderson17dd5b62016-01-28 18:19:59 -080056/* Wait this long before releasing periodic reservation */
57#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
58
59/**
60 * dwc2_do_unreserve() - Actually release the periodic reservation
61 *
62 * This function actually releases the periodic bandwidth that was reserved
63 * by the given qh.
64 *
65 * @hsotg: The HCD state structure for the DWC OTG controller
66 * @qh: QH for the periodic transfer.
67 */
68static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
69{
70 assert_spin_locked(&hsotg->lock);
71
72 WARN_ON(!qh->unreserve_pending);
73
74 /* No more unreserve pending--we're doing it */
75 qh->unreserve_pending = false;
76
77 if (WARN_ON(!list_empty(&qh->qh_list_entry)))
78 list_del_init(&qh->qh_list_entry);
79
80 /* Update claimed usecs per (micro)frame */
Douglas Andersonced9eee2016-01-28 18:20:04 -080081 hsotg->periodic_usecs -= qh->host_us;
Douglas Anderson17dd5b62016-01-28 18:19:59 -080082
83 if (hsotg->core_params->uframe_sched > 0) {
84 int i;
85
86 for (i = 0; i < 8; i++) {
87 hsotg->frame_usecs[i] += qh->frame_usecs[i];
88 qh->frame_usecs[i] = 0;
89 }
90 } else {
91 /* Release periodic channel reservation */
92 hsotg->periodic_channels--;
93 }
94}
95
96/**
97 * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
98 *
99 * According to the kernel doc for usb_submit_urb() (specifically the part about
100 * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
101 * long as a device driver keeps submitting. Since we're using HCD_BH to give
102 * back the URB we need to give the driver a little bit of time before we
103 * release the reservation. This worker is called after the appropriate
104 * delay.
105 *
106 * @work: Pointer to a qh unreserve_work.
107 */
108static void dwc2_unreserve_timer_fn(unsigned long data)
109{
110 struct dwc2_qh *qh = (struct dwc2_qh *)data;
111 struct dwc2_hsotg *hsotg = qh->hsotg;
112 unsigned long flags;
113
114 /*
115 * Wait for the lock, or for us to be scheduled again. We
116 * could be scheduled again if:
117 * - We started executing but didn't get the lock yet.
118 * - A new reservation came in, but cancel didn't take effect
119 * because we already started executing.
120 * - The timer has been kicked again.
121 * In that case cancel and wait for the next call.
122 */
123 while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
124 if (timer_pending(&qh->unreserve_timer))
125 return;
126 }
127
128 /*
129 * Might be no more unreserve pending if:
130 * - We started executing but didn't get the lock yet.
131 * - A new reservation came in, but cancel didn't take effect
132 * because we already started executing.
133 *
134 * We can't put this in the loop above because unreserve_pending needs
135 * to be accessed under lock, so we can only check it once we got the
136 * lock.
137 */
138 if (qh->unreserve_pending)
139 dwc2_do_unreserve(hsotg, qh);
140
141 spin_unlock_irqrestore(&hsotg->lock, flags);
142}
143
Paul Zimmerman7359d482013-03-11 17:47:59 -0700144/**
145 * dwc2_qh_init() - Initializes a QH structure
146 *
147 * @hsotg: The HCD state structure for the DWC OTG controller
148 * @qh: The QH to init
149 * @urb: Holds the information about the device/endpoint needed to initialize
150 * the QH
151 */
152#define SCHEDULE_SLOP 10
153static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
154 struct dwc2_hcd_urb *urb)
155{
156 int dev_speed, hub_addr, hub_port;
157 char *speed, *type;
158
159 dev_vdbg(hsotg->dev, "%s()\n", __func__);
160
161 /* Initialize QH */
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800162 qh->hsotg = hsotg;
163 setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
164 (unsigned long)qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700165 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
166 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
167
168 qh->data_toggle = DWC2_HC_PID_DATA0;
169 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
170 INIT_LIST_HEAD(&qh->qtd_list);
171 INIT_LIST_HEAD(&qh->qh_list_entry);
172
173 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
174 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
175
176 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
177
178 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
179 hub_addr != 0 && hub_addr != 1) {
180 dev_vdbg(hsotg->dev,
181 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
182 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
183 hub_port);
184 qh->do_split = 1;
185 }
186
187 if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
188 qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
189 /* Compute scheduling parameters once and save them */
190 u32 hprt, prtspd;
191
192 /* Todo: Account for split transfers in the bus time */
193 int bytecount =
194 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
195
Douglas Andersonced9eee2016-01-28 18:20:04 -0800196 qh->host_us = NS_TO_US(usb_calc_bus_time(qh->do_split ?
197 USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
198 qh->ep_type == USB_ENDPOINT_XFER_ISOC,
199 bytecount));
Gregory Herrerodd81dd72015-09-22 15:16:52 +0200200
201 /* Ensure frame_number corresponds to the reality */
202 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700203 /* Start in a slightly future (micro)frame */
Douglas Andersonced9eee2016-01-28 18:20:04 -0800204 qh->next_active_frame = dwc2_frame_num_inc(hsotg->frame_number,
Paul Zimmerman7359d482013-03-11 17:47:59 -0700205 SCHEDULE_SLOP);
Douglas Andersonced9eee2016-01-28 18:20:04 -0800206 qh->host_interval = urb->interval;
207 dwc2_sch_dbg(hsotg, "QH=%p init nxt=%04x, fn=%04x, int=%#x\n",
208 qh, qh->next_active_frame, hsotg->frame_number,
209 qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700210#if 0
211 /* Increase interrupt polling rate for debugging */
212 if (qh->ep_type == USB_ENDPOINT_XFER_INT)
Douglas Andersonced9eee2016-01-28 18:20:04 -0800213 qh->host_interval = 8;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700214#endif
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300215 hprt = dwc2_readl(hsotg->regs + HPRT0);
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200216 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700217 if (prtspd == HPRT0_SPD_HIGH_SPEED &&
218 (dev_speed == USB_SPEED_LOW ||
219 dev_speed == USB_SPEED_FULL)) {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800220 qh->host_interval *= 8;
221 qh->next_active_frame |= 0x7;
222 qh->start_split_frame = qh->next_active_frame;
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800223 dwc2_sch_dbg(hsotg,
Douglas Andersonced9eee2016-01-28 18:20:04 -0800224 "QH=%p init*8 nxt=%04x, fn=%04x, int=%#x\n",
225 qh, qh->next_active_frame,
226 hsotg->frame_number, qh->host_interval);
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800227
Paul Zimmerman7359d482013-03-11 17:47:59 -0700228 }
Douglas Andersonced9eee2016-01-28 18:20:04 -0800229 dev_dbg(hsotg->dev, "interval=%d\n", qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700230 }
231
232 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
233 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
234 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
235 dwc2_hcd_get_dev_addr(&urb->pipe_info));
236 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
237 dwc2_hcd_get_ep_num(&urb->pipe_info),
238 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
239
240 qh->dev_speed = dev_speed;
241
242 switch (dev_speed) {
243 case USB_SPEED_LOW:
244 speed = "low";
245 break;
246 case USB_SPEED_FULL:
247 speed = "full";
248 break;
249 case USB_SPEED_HIGH:
250 speed = "high";
251 break;
252 default:
253 speed = "?";
254 break;
255 }
256 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
257
258 switch (qh->ep_type) {
259 case USB_ENDPOINT_XFER_ISOC:
260 type = "isochronous";
261 break;
262 case USB_ENDPOINT_XFER_INT:
263 type = "interrupt";
264 break;
265 case USB_ENDPOINT_XFER_CONTROL:
266 type = "control";
267 break;
268 case USB_ENDPOINT_XFER_BULK:
269 type = "bulk";
270 break;
271 default:
272 type = "?";
273 break;
274 }
275
276 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
277
278 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
279 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
Douglas Andersonced9eee2016-01-28 18:20:04 -0800280 qh->host_us);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700281 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
Douglas Andersonced9eee2016-01-28 18:20:04 -0800282 qh->host_interval);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700283 }
284}
285
286/**
287 * dwc2_hcd_qh_create() - Allocates and initializes a QH
288 *
289 * @hsotg: The HCD state structure for the DWC OTG controller
290 * @urb: Holds the information about the device/endpoint needed
291 * to initialize the QH
292 * @atomic_alloc: Flag to do atomic allocation if needed
293 *
294 * Return: Pointer to the newly allocated QH, or NULL on error
295 */
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200296struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
Paul Zimmerman7359d482013-03-11 17:47:59 -0700297 struct dwc2_hcd_urb *urb,
298 gfp_t mem_flags)
299{
300 struct dwc2_qh *qh;
301
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700302 if (!urb->priv)
303 return NULL;
304
Paul Zimmerman7359d482013-03-11 17:47:59 -0700305 /* Allocate memory */
306 qh = kzalloc(sizeof(*qh), mem_flags);
307 if (!qh)
308 return NULL;
309
310 dwc2_qh_init(hsotg, qh, urb);
311
312 if (hsotg->core_params->dma_desc_enable > 0 &&
313 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
314 dwc2_hcd_qh_free(hsotg, qh);
315 return NULL;
316 }
317
318 return qh;
319}
320
321/**
322 * dwc2_hcd_qh_free() - Frees the QH
323 *
324 * @hsotg: HCD instance
325 * @qh: The QH to free
326 *
327 * QH should already be removed from the list. QTD list should already be empty
328 * if called from URB Dequeue.
329 *
330 * Must NOT be called with interrupt disabled or spinlock held
331 */
332void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
333{
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800334 /* Make sure any unreserve work is finished. */
335 if (del_timer_sync(&qh->unreserve_timer)) {
336 unsigned long flags;
337
338 spin_lock_irqsave(&hsotg->lock, flags);
339 dwc2_do_unreserve(hsotg, qh);
340 spin_unlock_irqrestore(&hsotg->lock, flags);
341 }
342
Douglas Anderson3bc04e22016-01-28 18:19:53 -0800343 if (qh->desc_list)
Paul Zimmerman7359d482013-03-11 17:47:59 -0700344 dwc2_hcd_qh_free_ddma(hsotg, qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700345 kfree(qh);
346}
347
348/**
349 * dwc2_periodic_channel_available() - Checks that a channel is available for a
350 * periodic transfer
351 *
352 * @hsotg: The HCD state structure for the DWC OTG controller
353 *
Masanari Iida0dcde5082013-09-13 23:34:36 +0900354 * Return: 0 if successful, negative error code otherwise
Paul Zimmerman7359d482013-03-11 17:47:59 -0700355 */
356static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
357{
358 /*
Masanari Iida0dcde5082013-09-13 23:34:36 +0900359 * Currently assuming that there is a dedicated host channel for
Paul Zimmerman7359d482013-03-11 17:47:59 -0700360 * each periodic transaction plus at least one host channel for
361 * non-periodic transactions
362 */
363 int status;
364 int num_channels;
365
366 num_channels = hsotg->core_params->host_channels;
367 if (hsotg->periodic_channels + hsotg->non_periodic_channels <
368 num_channels
369 && hsotg->periodic_channels < num_channels - 1) {
370 status = 0;
371 } else {
372 dev_dbg(hsotg->dev,
373 "%s: Total channels: %d, Periodic: %d, "
374 "Non-periodic: %d\n", __func__, num_channels,
375 hsotg->periodic_channels, hsotg->non_periodic_channels);
376 status = -ENOSPC;
377 }
378
379 return status;
380}
381
382/**
383 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
384 * for the specified QH in the periodic schedule
385 *
386 * @hsotg: The HCD state structure for the DWC OTG controller
387 * @qh: QH containing periodic bandwidth required
388 *
389 * Return: 0 if successful, negative error code otherwise
390 *
391 * For simplicity, this calculation assumes that all the transfers in the
392 * periodic schedule may occur in the same (micro)frame
393 */
394static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
395 struct dwc2_qh *qh)
396{
397 int status;
398 s16 max_claimed_usecs;
399
400 status = 0;
401
402 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
403 /*
404 * High speed mode
405 * Max periodic usecs is 80% x 125 usec = 100 usec
406 */
Douglas Andersonced9eee2016-01-28 18:20:04 -0800407 max_claimed_usecs = 100 - qh->host_us;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700408 } else {
409 /*
410 * Full speed mode
411 * Max periodic usecs is 90% x 1000 usec = 900 usec
412 */
Douglas Andersonced9eee2016-01-28 18:20:04 -0800413 max_claimed_usecs = 900 - qh->host_us;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700414 }
415
416 if (hsotg->periodic_usecs > max_claimed_usecs) {
417 dev_err(hsotg->dev,
418 "%s: already claimed usecs %d, required usecs %d\n",
Douglas Andersonced9eee2016-01-28 18:20:04 -0800419 __func__, hsotg->periodic_usecs, qh->host_us);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700420 status = -ENOSPC;
421 }
422
423 return status;
424}
425
426/**
Dom Cobley20f2eb92013-09-23 14:23:34 -0700427 * Microframe scheduler
428 * track the total use in hsotg->frame_usecs
429 * keep each qh use in qh->frame_usecs
430 * when surrendering the qh then donate the time back
431 */
432static const unsigned short max_uframe_usecs[] = {
433 100, 100, 100, 100, 100, 100, 30, 0
434};
435
436void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
437{
438 int i;
439
440 for (i = 0; i < 8; i++)
441 hsotg->frame_usecs[i] = max_uframe_usecs[i];
442}
443
444static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
445{
Douglas Andersonced9eee2016-01-28 18:20:04 -0800446 unsigned short utime = qh->host_us;
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530447 int i;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700448
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530449 for (i = 0; i < 8; i++) {
Dom Cobley20f2eb92013-09-23 14:23:34 -0700450 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
451 if (utime <= hsotg->frame_usecs[i]) {
452 hsotg->frame_usecs[i] -= utime;
453 qh->frame_usecs[i] += utime;
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530454 return i;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700455 }
456 }
Paul Zimmerman9bda1aa2013-11-22 16:43:45 -0800457 return -ENOSPC;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700458}
459
460/*
461 * use this for FS apps that can span multiple uframes
462 */
463static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
464{
Douglas Andersonced9eee2016-01-28 18:20:04 -0800465 unsigned short utime = qh->host_us;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700466 unsigned short xtime;
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530467 int t_left;
468 int i;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700469 int j;
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530470 int k;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700471
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530472 for (i = 0; i < 8; i++) {
473 if (hsotg->frame_usecs[i] <= 0)
Dom Cobley20f2eb92013-09-23 14:23:34 -0700474 continue;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700475
476 /*
477 * we need n consecutive slots so use j as a start slot
478 * j plus j+1 must be enough time (for now)
479 */
480 xtime = hsotg->frame_usecs[i];
481 for (j = i + 1; j < 8; j++) {
482 /*
483 * if we add this frame remaining time to xtime we may
484 * be OK, if not we need to test j for a complete frame
485 */
486 if (xtime + hsotg->frame_usecs[j] < utime) {
487 if (hsotg->frame_usecs[j] <
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530488 max_uframe_usecs[j])
489 continue;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700490 }
491 if (xtime >= utime) {
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530492 t_left = utime;
493 for (k = i; k < 8; k++) {
494 t_left -= hsotg->frame_usecs[k];
495 if (t_left <= 0) {
496 qh->frame_usecs[k] +=
497 hsotg->frame_usecs[k]
498 + t_left;
499 hsotg->frame_usecs[k] = -t_left;
500 return i;
501 } else {
502 qh->frame_usecs[k] +=
503 hsotg->frame_usecs[k];
504 hsotg->frame_usecs[k] = 0;
505 }
506 }
Dom Cobley20f2eb92013-09-23 14:23:34 -0700507 }
508 /* add the frame time to x time */
509 xtime += hsotg->frame_usecs[j];
510 /* we must have a fully available next frame or break */
511 if (xtime < utime &&
Himangi Saraogi86c17c02013-11-02 10:05:30 +0530512 hsotg->frame_usecs[j] == max_uframe_usecs[j])
513 continue;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700514 }
515 }
Paul Zimmerman9bda1aa2013-11-22 16:43:45 -0800516 return -ENOSPC;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700517}
518
519static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
520{
521 int ret;
522
523 if (qh->dev_speed == USB_SPEED_HIGH) {
524 /* if this is a hs transaction we need a full frame */
525 ret = dwc2_find_single_uframe(hsotg, qh);
526 } else {
527 /*
528 * if this is a fs transaction we may need a sequence
529 * of frames
530 */
531 ret = dwc2_find_multi_uframe(hsotg, qh);
532 }
533 return ret;
534}
535
536/**
Paul Zimmerman7359d482013-03-11 17:47:59 -0700537 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
538 * host channel is large enough to handle the maximum data transfer in a single
539 * (micro)frame for a periodic transfer
540 *
541 * @hsotg: The HCD state structure for the DWC OTG controller
542 * @qh: QH for a periodic endpoint
543 *
544 * Return: 0 if successful, negative error code otherwise
545 */
546static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
547 struct dwc2_qh *qh)
548{
549 u32 max_xfer_size;
550 u32 max_channel_xfer_size;
551 int status = 0;
552
553 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
554 max_channel_xfer_size = hsotg->core_params->max_transfer_size;
555
556 if (max_xfer_size > max_channel_xfer_size) {
557 dev_err(hsotg->dev,
558 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
559 __func__, max_xfer_size, max_channel_xfer_size);
560 status = -ENOSPC;
561 }
562
563 return status;
564}
565
566/**
567 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
568 * the periodic schedule
569 *
570 * @hsotg: The HCD state structure for the DWC OTG controller
571 * @qh: QH for the periodic transfer. The QH should already contain the
572 * scheduling information.
573 *
574 * Return: 0 if successful, negative error code otherwise
575 */
576static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
577{
578 int status;
579
Paul Zimmerman7359d482013-03-11 17:47:59 -0700580 status = dwc2_check_max_xfer_size(hsotg, qh);
581 if (status) {
582 dev_dbg(hsotg->dev,
583 "%s: Channel max transfer size too small for periodic transfer\n",
584 __func__);
585 return status;
586 }
587
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800588 /* Cancel pending unreserve; if canceled OK, unreserve was pending */
589 if (del_timer(&qh->unreserve_timer))
590 WARN_ON(!qh->unreserve_pending);
591
592 /*
593 * Only need to reserve if there's not an unreserve pending, since if an
594 * unreserve is pending then by definition our old reservation is still
595 * valid. Unreserve might still be pending even if we didn't cancel if
596 * dwc2_unreserve_timer_fn() already started. Code in the timer handles
597 * that case.
598 */
599 if (!qh->unreserve_pending) {
600 if (hsotg->core_params->uframe_sched > 0) {
601 int frame = -1;
602
603 status = dwc2_find_uframe(hsotg, qh);
604 if (status == 0)
605 frame = 7;
606 else if (status > 0)
607 frame = status - 1;
608
609 /* Set the new frame up */
610 if (frame >= 0) {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800611 qh->next_active_frame &= ~0x7;
612 qh->next_active_frame |= (frame & 7);
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800613 dwc2_sch_dbg(hsotg,
Douglas Andersonced9eee2016-01-28 18:20:04 -0800614 "QH=%p sched_p nxt=%04x, uf=%d\n",
615 qh, qh->next_active_frame, frame);
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800616 }
617
618 if (status > 0)
619 status = 0;
620 } else {
621 status = dwc2_periodic_channel_available(hsotg);
622 if (status) {
623 dev_info(hsotg->dev,
624 "%s: No host channel available for periodic transfer\n",
625 __func__);
626 return status;
627 }
628
629 status = dwc2_check_periodic_bandwidth(hsotg, qh);
630 }
631
632 if (status) {
633 dev_dbg(hsotg->dev,
634 "%s: Insufficient periodic bandwidth for periodic transfer\n",
635 __func__);
636 return status;
637 }
638
639 if (hsotg->core_params->uframe_sched <= 0)
640 /* Reserve periodic channel */
641 hsotg->periodic_channels++;
642
643 /* Update claimed usecs per (micro)frame */
Douglas Andersonced9eee2016-01-28 18:20:04 -0800644 hsotg->periodic_usecs += qh->host_us;
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800645 }
646
647 qh->unreserve_pending = 0;
648
Paul Zimmerman7359d482013-03-11 17:47:59 -0700649 if (hsotg->core_params->dma_desc_enable > 0)
650 /* Don't rely on SOF and start in ready schedule */
651 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
652 else
653 /* Always start in inactive schedule */
654 list_add_tail(&qh->qh_list_entry,
655 &hsotg->periodic_sched_inactive);
656
Paul Zimmerman7359d482013-03-11 17:47:59 -0700657 return status;
658}
659
660/**
661 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
662 * from the periodic schedule
663 *
664 * @hsotg: The HCD state structure for the DWC OTG controller
665 * @qh: QH for the periodic transfer
666 */
667static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
668 struct dwc2_qh *qh)
669{
Douglas Anderson17dd5b62016-01-28 18:19:59 -0800670 bool did_modify;
671
672 assert_spin_locked(&hsotg->lock);
673
674 /*
675 * Schedule the unreserve to happen in a little bit. Cases here:
676 * - Unreserve worker might be sitting there waiting to grab the lock.
677 * In this case it will notice it's been schedule again and will
678 * quit.
679 * - Unreserve worker might not be scheduled.
680 *
681 * We should never already be scheduled since dwc2_schedule_periodic()
682 * should have canceled the scheduled unreserve timer (hence the
683 * warning on did_modify).
684 *
685 * We add + 1 to the timer to guarantee that at least 1 jiffy has
686 * passed (otherwise if the jiffy counter might tick right after we
687 * read it and we'll get no delay).
688 */
689 did_modify = mod_timer(&qh->unreserve_timer,
690 jiffies + DWC2_UNRESERVE_DELAY + 1);
691 WARN_ON(did_modify);
692 qh->unreserve_pending = 1;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700693
Dom Cobley20f2eb92013-09-23 14:23:34 -0700694 list_del_init(&qh->qh_list_entry);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700695}
696
697/**
698 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
699 * schedule if it is not already in the schedule. If the QH is already in
700 * the schedule, no action is taken.
701 *
702 * @hsotg: The HCD state structure for the DWC OTG controller
703 * @qh: The QH to add
704 *
705 * Return: 0 if successful, negative error code otherwise
706 */
707int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
708{
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300709 int status;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700710 u32 intr_mask;
711
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200712 if (dbg_qh(qh))
713 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700714
715 if (!list_empty(&qh->qh_list_entry))
716 /* QH already in a schedule */
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300717 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700718
Douglas Andersonced9eee2016-01-28 18:20:04 -0800719 if (!dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number) &&
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200720 !hsotg->frame_number) {
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800721 u16 new_frame;
722
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200723 dev_dbg(hsotg->dev,
724 "reset frame number counter\n");
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800725 new_frame = dwc2_frame_num_inc(hsotg->frame_number,
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200726 SCHEDULE_SLOP);
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800727
Douglas Andersonced9eee2016-01-28 18:20:04 -0800728 dwc2_sch_vdbg(hsotg, "QH=%p reset nxt=%04x=>%04x\n",
729 qh, qh->next_active_frame, new_frame);
730 qh->next_active_frame = new_frame;
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200731 }
732
Paul Zimmerman7359d482013-03-11 17:47:59 -0700733 /* Add the new QH to the appropriate schedule */
734 if (dwc2_qh_is_non_per(qh)) {
735 /* Always start in inactive schedule */
736 list_add_tail(&qh->qh_list_entry,
737 &hsotg->non_periodic_sched_inactive);
Dan Carpenter5e128472013-11-25 17:14:14 +0300738 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700739 }
740
Dan Carpenter5e128472013-11-25 17:14:14 +0300741 status = dwc2_schedule_periodic(hsotg, qh);
742 if (status)
743 return status;
744 if (!hsotg->periodic_qh_count) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300745 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300746 intr_mask |= GINTSTS_SOF;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300747 dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300748 }
749 hsotg->periodic_qh_count++;
750
Dan Carpenterd31e6ca2013-11-25 17:11:29 +0300751 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700752}
753
754/**
755 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
756 * schedule. Memory is not freed.
757 *
758 * @hsotg: The HCD state structure
759 * @qh: QH to remove from schedule
760 */
761void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
762{
763 u32 intr_mask;
764
765 dev_vdbg(hsotg->dev, "%s()\n", __func__);
766
767 if (list_empty(&qh->qh_list_entry))
768 /* QH is not in a schedule */
769 return;
770
771 if (dwc2_qh_is_non_per(qh)) {
772 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
773 hsotg->non_periodic_qh_ptr =
774 hsotg->non_periodic_qh_ptr->next;
775 list_del_init(&qh->qh_list_entry);
Dan Carpenter5e128472013-11-25 17:14:14 +0300776 return;
777 }
778
779 dwc2_deschedule_periodic(hsotg, qh);
780 hsotg->periodic_qh_count--;
781 if (!hsotg->periodic_qh_count) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300782 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
Dan Carpenter5e128472013-11-25 17:14:14 +0300783 intr_mask &= ~GINTSTS_SOF;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300784 dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700785 }
786}
787
788/*
789 * Schedule the next continuing periodic split transfer
790 */
791static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
792 struct dwc2_qh *qh, u16 frame_number,
793 int sched_next_periodic_split)
794{
795 u16 incr;
Douglas Andersonced9eee2016-01-28 18:20:04 -0800796 u16 old_frame = qh->next_active_frame;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700797
798 if (sched_next_periodic_split) {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800799 qh->next_active_frame = frame_number;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700800 incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
801 if (dwc2_frame_num_le(frame_number, incr)) {
802 /*
803 * Allow one frame to elapse after start split
804 * microframe before scheduling complete split, but
805 * DON'T if we are doing the next start split in the
806 * same frame for an ISOC out
807 */
808 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
809 qh->ep_is_in != 0) {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800810 qh->next_active_frame = dwc2_frame_num_inc(
811 qh->next_active_frame, 1);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700812 }
813 }
814 } else {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800815 qh->next_active_frame =
816 dwc2_frame_num_inc(qh->start_split_frame,
817 qh->host_interval);
818 if (dwc2_frame_num_le(qh->next_active_frame, frame_number))
819 qh->next_active_frame = frame_number;
820 qh->next_active_frame |= 0x7;
821 qh->start_split_frame = qh->next_active_frame;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700822 }
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800823
Douglas Andersonced9eee2016-01-28 18:20:04 -0800824 dwc2_sch_vdbg(hsotg, "QH=%p next(%d) fn=%04x, nxt=%04x=>%04x (%+d)\n",
Douglas Anderson74fc4a72016-01-28 18:19:58 -0800825 qh, sched_next_periodic_split, frame_number, old_frame,
Douglas Andersonced9eee2016-01-28 18:20:04 -0800826 qh->next_active_frame,
827 dwc2_frame_num_dec(qh->next_active_frame, old_frame));
Paul Zimmerman7359d482013-03-11 17:47:59 -0700828}
829
830/*
831 * Deactivates a QH. For non-periodic QHs, removes the QH from the active
832 * non-periodic schedule. The QH is added to the inactive non-periodic
833 * schedule if any QTDs are still attached to the QH.
834 *
835 * For periodic QHs, the QH is removed from the periodic queued schedule. If
836 * there are any QTDs still attached to the QH, the QH is added to either the
837 * periodic inactive schedule or the periodic ready schedule and its next
838 * scheduled frame is calculated. The QH is placed in the ready schedule if
839 * the scheduled frame has been reached already. Otherwise it's placed in the
840 * inactive schedule. If there are no QTDs attached to the QH, the QH is
841 * completely removed from the periodic schedule.
842 */
843void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
844 int sched_next_periodic_split)
845{
Dan Carpenter5e128472013-11-25 17:14:14 +0300846 u16 frame_number;
847
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200848 if (dbg_qh(qh))
849 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700850
851 if (dwc2_qh_is_non_per(qh)) {
852 dwc2_hcd_qh_unlink(hsotg, qh);
853 if (!list_empty(&qh->qtd_list))
854 /* Add back to inactive non-periodic schedule */
855 dwc2_hcd_qh_add(hsotg, qh);
Dan Carpenter5e128472013-11-25 17:14:14 +0300856 return;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700857 }
Dan Carpenter5e128472013-11-25 17:14:14 +0300858
859 frame_number = dwc2_hcd_get_frame_number(hsotg);
860
861 if (qh->do_split) {
862 dwc2_sched_periodic_split(hsotg, qh, frame_number,
863 sched_next_periodic_split);
864 } else {
Douglas Andersonced9eee2016-01-28 18:20:04 -0800865 qh->next_active_frame = dwc2_frame_num_inc(
866 qh->next_active_frame, qh->host_interval);
867 if (dwc2_frame_num_le(qh->next_active_frame, frame_number))
868 qh->next_active_frame = frame_number;
Dan Carpenter5e128472013-11-25 17:14:14 +0300869 }
870
871 if (list_empty(&qh->qtd_list)) {
872 dwc2_hcd_qh_unlink(hsotg, qh);
873 return;
874 }
875 /*
876 * Remove from periodic_sched_queued and move to
877 * appropriate queue
878 */
879 if ((hsotg->core_params->uframe_sched > 0 &&
Douglas Andersonced9eee2016-01-28 18:20:04 -0800880 dwc2_frame_num_le(qh->next_active_frame, frame_number)) ||
Dan Carpenter5e128472013-11-25 17:14:14 +0300881 (hsotg->core_params->uframe_sched <= 0 &&
Douglas Andersonced9eee2016-01-28 18:20:04 -0800882 qh->next_active_frame == frame_number))
Douglas Anderson94ef7ae2016-01-28 18:19:56 -0800883 list_move_tail(&qh->qh_list_entry,
884 &hsotg->periodic_sched_ready);
Dan Carpenter5e128472013-11-25 17:14:14 +0300885 else
Douglas Anderson94ef7ae2016-01-28 18:19:56 -0800886 list_move_tail(&qh->qh_list_entry,
887 &hsotg->periodic_sched_inactive);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700888}
889
890/**
891 * dwc2_hcd_qtd_init() - Initializes a QTD structure
892 *
893 * @qtd: The QTD to initialize
894 * @urb: The associated URB
895 */
896void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
897{
898 qtd->urb = urb;
899 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
900 USB_ENDPOINT_XFER_CONTROL) {
901 /*
902 * The only time the QTD data toggle is used is on the data
903 * phase of control transfers. This phase always starts with
904 * DATA1.
905 */
906 qtd->data_toggle = DWC2_HC_PID_DATA1;
907 qtd->control_phase = DWC2_CONTROL_SETUP;
908 }
909
910 /* Start split */
911 qtd->complete_split = 0;
912 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
913 qtd->isoc_split_offset = 0;
914 qtd->in_process = 0;
915
916 /* Store the qtd ptr in the urb to reference the QTD */
917 urb->qtd = qtd;
918}
919
920/**
921 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
Gregory Herrero33ad2612015-04-29 22:09:15 +0200922 * Caller must hold driver lock.
Paul Zimmerman7359d482013-03-11 17:47:59 -0700923 *
924 * @hsotg: The DWC HCD structure
925 * @qtd: The QTD to add
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200926 * @qh: Queue head to add qtd to
Paul Zimmerman7359d482013-03-11 17:47:59 -0700927 *
928 * Return: 0 if successful, negative error code otherwise
929 *
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200930 * If the QH to which the QTD is added is not currently scheduled, it is placed
931 * into the proper schedule based on its EP type.
Paul Zimmerman7359d482013-03-11 17:47:59 -0700932 */
933int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200934 struct dwc2_qh *qh)
Paul Zimmerman7359d482013-03-11 17:47:59 -0700935{
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700936 int retval;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700937
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200938 if (unlikely(!qh)) {
939 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
940 retval = -EINVAL;
941 goto fail;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700942 }
943
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200944 retval = dwc2_hcd_qh_add(hsotg, qh);
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700945 if (retval)
946 goto fail;
947
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +0200948 qtd->qh = qh;
949 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700950
951 return 0;
Paul Zimmermanb2d6cb52013-07-13 14:53:51 -0700952fail:
Paul Zimmerman7359d482013-03-11 17:47:59 -0700953 return retval;
954}