blob: bda0b21b850f695627efb5b1e70ae0e63de39638 [file] [log] [blame]
Paul Zimmerman7359d482013-03-11 17:47:59 -07001/*
2 * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * This file contains the interrupt handlers for Host mode
39 */
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/spinlock.h>
43#include <linux/interrupt.h>
44#include <linux/dma-mapping.h>
45#include <linux/io.h>
46#include <linux/slab.h>
47#include <linux/usb.h>
48
49#include <linux/usb/hcd.h>
50#include <linux/usb/ch11.h>
51
52#include "core.h"
53#include "hcd.h"
54
55/* This function is for debug only */
56static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
57{
58#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
Paul Zimmerman7359d482013-03-11 17:47:59 -070059 u16 curr_frame_number = hsotg->frame_number;
60
61 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
62 if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
63 curr_frame_number) {
64 hsotg->frame_num_array[hsotg->frame_num_idx] =
65 curr_frame_number;
66 hsotg->last_frame_num_array[hsotg->frame_num_idx] =
67 hsotg->last_frame_num;
68 hsotg->frame_num_idx++;
69 }
70 } else if (!hsotg->dumped_frame_num_array) {
71 int i;
72
73 dev_info(hsotg->dev, "Frame Last Frame\n");
74 dev_info(hsotg->dev, "----- ----------\n");
75 for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
76 dev_info(hsotg->dev, "0x%04x 0x%04x\n",
77 hsotg->frame_num_array[i],
78 hsotg->last_frame_num_array[i]);
79 }
80 hsotg->dumped_frame_num_array = 1;
81 }
82 hsotg->last_frame_num = curr_frame_number;
83#endif
84}
85
86static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
87 struct dwc2_host_chan *chan,
88 struct dwc2_qtd *qtd)
89{
90 struct urb *usb_urb;
91
Paul Zimmerman399fdf92013-07-13 14:53:50 -070092 if (!chan->qh)
93 return;
94
95 if (chan->qh->dev_speed == USB_SPEED_HIGH)
96 return;
97
98 if (!qtd->urb)
Paul Zimmerman7359d482013-03-11 17:47:59 -070099 return;
100
101 usb_urb = qtd->urb->priv;
Paul Zimmerman399fdf92013-07-13 14:53:50 -0700102 if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
Paul Zimmerman7359d482013-03-11 17:47:59 -0700103 return;
104
Paul Zimmerman399fdf92013-07-13 14:53:50 -0700105 if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
Paul Zimmerman7359d482013-03-11 17:47:59 -0700106 chan->qh->tt_buffer_dirty = 1;
107 if (usb_hub_clear_tt_buffer(usb_urb))
108 /* Clear failed; let's hope things work anyway */
109 chan->qh->tt_buffer_dirty = 0;
110 }
111}
112
113/*
114 * Handles the start-of-frame interrupt in host mode. Non-periodic
115 * transactions may be queued to the DWC_otg controller for the current
116 * (micro)frame. Periodic transactions may be queued to the controller
117 * for the next (micro)frame.
118 */
119static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
120{
121 struct list_head *qh_entry;
122 struct dwc2_qh *qh;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700123 enum dwc2_transaction_type tr_type;
124
125#ifdef DEBUG_SOF
126 dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
127#endif
128
Matthijs Kooijman37e1dcc2013-04-29 19:40:23 +0000129 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700130
131 dwc2_track_missed_sofs(hsotg);
132
133 /* Determine whether any periodic QHs should be executed */
134 qh_entry = hsotg->periodic_sched_inactive.next;
135 while (qh_entry != &hsotg->periodic_sched_inactive) {
136 qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
137 qh_entry = qh_entry->next;
138 if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
139 /*
140 * Move QH to the ready list to be executed next
141 * (micro)frame
142 */
143 list_move(&qh->qh_list_entry,
144 &hsotg->periodic_sched_ready);
145 }
146 tr_type = dwc2_hcd_select_transactions(hsotg);
147 if (tr_type != DWC2_TRANSACTION_NONE)
148 dwc2_hcd_queue_transactions(hsotg, tr_type);
149
150 /* Clear interrupt */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300151 dwc2_writel(GINTSTS_SOF, hsotg->regs + GINTSTS);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700152}
153
154/*
155 * Handles the Rx FIFO Level Interrupt, which indicates that there is
156 * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
157 * memory if the DWC_otg controller is operating in Slave mode.
158 */
159static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
160{
161 u32 grxsts, chnum, bcnt, dpid, pktsts;
162 struct dwc2_host_chan *chan;
163
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200164 if (dbg_perio())
165 dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -0700166
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300167 grxsts = dwc2_readl(hsotg->regs + GRXSTSP);
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +0200168 chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700169 chan = hsotg->hc_ptr_array[chnum];
170 if (!chan) {
171 dev_err(hsotg->dev, "Unable to get corresponding channel\n");
172 return;
173 }
174
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +0200175 bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
176 dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200177 pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700178
179 /* Packet Status */
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200180 if (dbg_perio()) {
181 dev_vdbg(hsotg->dev, " Ch num = %d\n", chnum);
182 dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
183 dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", dpid,
184 chan->data_pid_start);
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200185 dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200186 }
Paul Zimmerman7359d482013-03-11 17:47:59 -0700187
188 switch (pktsts) {
189 case GRXSTS_PKTSTS_HCHIN:
190 /* Read the data into the host buffer */
191 if (bcnt > 0) {
192 dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
193
194 /* Update the HC fields for the next packet received */
195 chan->xfer_count += bcnt;
196 chan->xfer_buf += bcnt;
197 }
198 break;
199 case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
200 case GRXSTS_PKTSTS_DATATOGGLEERR:
201 case GRXSTS_PKTSTS_HCHHALTED:
202 /* Handled in interrupt, just ignore data */
203 break;
204 default:
205 dev_err(hsotg->dev,
206 "RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
207 break;
208 }
209}
210
211/*
212 * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
213 * data packets may be written to the FIFO for OUT transfers. More requests
214 * may be written to the non-periodic request queue for IN transfers. This
215 * interrupt is enabled only in Slave mode.
216 */
217static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
218{
219 dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
220 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
221}
222
223/*
224 * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
225 * packets may be written to the FIFO for OUT transfers. More requests may be
226 * written to the periodic request queue for IN transfers. This interrupt is
227 * enabled only in Slave mode.
228 */
229static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
230{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200231 if (dbg_perio())
232 dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -0700233 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
234}
235
236static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
237 u32 *hprt0_modify)
238{
239 struct dwc2_core_params *params = hsotg->core_params;
240 int do_reset = 0;
241 u32 usbcfg;
242 u32 prtspd;
243 u32 hcfg;
Matthijs Kooijmanbcc5def2013-04-29 19:42:00 +0000244 u32 fslspclksel;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700245 u32 hfir;
246
247 dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
248
249 /* Every time when port enables calculate HFIR.FrInterval */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300250 hfir = dwc2_readl(hsotg->regs + HFIR);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700251 hfir &= ~HFIR_FRINT_MASK;
252 hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
253 HFIR_FRINT_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300254 dwc2_writel(hfir, hsotg->regs + HFIR);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700255
256 /* Check if we need to adjust the PHY clock speed for low power */
257 if (!params->host_support_fs_ls_low_power) {
258 /* Port has been enabled, set the reset change flag */
259 hsotg->flags.b.port_reset_change = 1;
260 return;
261 }
262
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300263 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200264 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700265
266 if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
267 /* Low power */
268 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
269 /* Set PHY low power clock select for FS/LS devices */
270 usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300271 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700272 do_reset = 1;
273 }
274
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300275 hcfg = dwc2_readl(hsotg->regs + HCFG);
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200276 fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
277 HCFG_FSLSPCLKSEL_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700278
279 if (prtspd == HPRT0_SPD_LOW_SPEED &&
280 params->host_ls_low_power_phy_clk ==
281 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
282 /* 6 MHZ */
283 dev_vdbg(hsotg->dev,
284 "FS_PHY programming HCFG to 6 MHz\n");
Matthijs Kooijmanbcc5def2013-04-29 19:42:00 +0000285 if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200286 fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700287 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200288 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300289 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700290 do_reset = 1;
291 }
292 } else {
293 /* 48 MHZ */
294 dev_vdbg(hsotg->dev,
295 "FS_PHY programming HCFG to 48 MHz\n");
Matthijs Kooijmanbcc5def2013-04-29 19:42:00 +0000296 if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200297 fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700298 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200299 hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300300 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700301 do_reset = 1;
302 }
303 }
304 } else {
305 /* Not low power */
306 if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
307 usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300308 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700309 do_reset = 1;
310 }
311 }
312
313 if (do_reset) {
314 *hprt0_modify |= HPRT0_RST;
315 queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
316 msecs_to_jiffies(60));
317 } else {
318 /* Port has been enabled, set the reset change flag */
319 hsotg->flags.b.port_reset_change = 1;
320 }
321}
322
323/*
324 * There are multiple conditions that can cause a port interrupt. This function
325 * determines which interrupt conditions have occurred and handles them
326 * appropriately.
327 */
328static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
329{
330 u32 hprt0;
331 u32 hprt0_modify;
332
333 dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
334
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300335 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700336 hprt0_modify = hprt0;
337
338 /*
339 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
340 * GINTSTS
341 */
342 hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
343 HPRT0_OVRCURRCHG);
344
345 /*
346 * Port Connect Detected
347 * Set flag and clear if detected
348 */
349 if (hprt0 & HPRT0_CONNDET) {
350 dev_vdbg(hsotg->dev,
351 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
352 hprt0);
Gregory Herreroa7714c12015-04-29 22:09:14 +0200353 if (hsotg->lx_state != DWC2_L0)
354 usb_hcd_resume_root_hub(hsotg->priv);
355
Paul Zimmerman7359d482013-03-11 17:47:59 -0700356 hsotg->flags.b.port_connect_status_change = 1;
357 hsotg->flags.b.port_connect_status = 1;
358 hprt0_modify |= HPRT0_CONNDET;
359
360 /*
361 * The Hub driver asserts a reset when it sees port connect
362 * status change flag
363 */
364 }
365
366 /*
367 * Port Enable Changed
368 * Clear if detected - Set internal flag if disabled
369 */
370 if (hprt0 & HPRT0_ENACHG) {
371 dev_vdbg(hsotg->dev,
372 " --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
373 hprt0, !!(hprt0 & HPRT0_ENA));
374 hprt0_modify |= HPRT0_ENACHG;
375 if (hprt0 & HPRT0_ENA)
376 dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
377 else
378 hsotg->flags.b.port_enable_change = 1;
379 }
380
381 /* Overcurrent Change Interrupt */
382 if (hprt0 & HPRT0_OVRCURRCHG) {
383 dev_vdbg(hsotg->dev,
384 " --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
385 hprt0);
386 hsotg->flags.b.port_over_current_change = 1;
387 hprt0_modify |= HPRT0_OVRCURRCHG;
388 }
389
390 /* Clear Port Interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300391 dwc2_writel(hprt0_modify, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700392}
393
394/*
395 * Gets the actual length of a transfer after the transfer halts. halt_status
396 * holds the reason for the halt.
397 *
398 * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
399 * is set to 1 upon return if less than the requested number of bytes were
400 * transferred. short_read may also be NULL on entry, in which case it remains
401 * unchanged.
402 */
403static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
404 struct dwc2_host_chan *chan, int chnum,
405 struct dwc2_qtd *qtd,
406 enum dwc2_halt_status halt_status,
407 int *short_read)
408{
409 u32 hctsiz, count, length;
410
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300411 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -0700412
413 if (halt_status == DWC2_HC_XFER_COMPLETE) {
414 if (chan->ep_is_in) {
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +0200415 count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
416 TSIZ_XFERSIZE_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700417 length = chan->xfer_len - count;
418 if (short_read != NULL)
419 *short_read = (count != 0);
420 } else if (chan->qh->do_split) {
421 length = qtd->ssplit_out_xfer_count;
422 } else {
423 length = chan->xfer_len;
424 }
425 } else {
426 /*
427 * Must use the hctsiz.pktcnt field to determine how much data
428 * has been transferred. This field reflects the number of
429 * packets that have been transferred via the USB. This is
430 * always an integral number of packets if the transfer was
431 * halted before its normal completion. (Can't use the
432 * hctsiz.xfersize field because that reflects the number of
433 * bytes transferred via the AHB, not the USB).
434 */
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +0200435 count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700436 length = (chan->start_pkt_count - count) * chan->max_packet;
437 }
438
439 return length;
440}
441
442/**
443 * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
444 * Complete interrupt on the host channel. Updates the actual_length field
445 * of the URB based on the number of bytes transferred via the host channel.
446 * Sets the URB status if the data transfer is finished.
447 *
448 * Return: 1 if the data transfer specified by the URB is completely finished,
449 * 0 otherwise
450 */
451static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
452 struct dwc2_host_chan *chan, int chnum,
453 struct dwc2_hcd_urb *urb,
454 struct dwc2_qtd *qtd)
455{
456 u32 hctsiz;
457 int xfer_done = 0;
458 int short_read = 0;
459 int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
460 DWC2_HC_XFER_COMPLETE,
461 &short_read);
462
463 if (urb->actual_length + xfer_length > urb->length) {
464 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
465 xfer_length = urb->length - urb->actual_length;
466 }
467
468 /* Non DWORD-aligned buffer case handling */
Gregory Herrerodb62b9a2015-04-29 22:09:16 +0200469 if (chan->align_buf && xfer_length) {
Paul Zimmerman0cf31f82013-07-13 14:53:52 -0700470 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
Gregory Herrerodb62b9a2015-04-29 22:09:16 +0200471 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
472 chan->qh->dw_align_buf_size,
473 chan->ep_is_in ?
474 DMA_FROM_DEVICE : DMA_TO_DEVICE);
475 if (chan->ep_is_in)
476 memcpy(urb->buf + urb->actual_length,
477 chan->qh->dw_align_buf, xfer_length);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700478 }
479
480 dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
481 urb->actual_length, xfer_length);
482 urb->actual_length += xfer_length;
483
484 if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
485 (urb->flags & URB_SEND_ZERO_PACKET) &&
486 urb->actual_length >= urb->length &&
487 !(urb->length % chan->max_packet)) {
488 xfer_done = 0;
489 } else if (short_read || urb->actual_length >= urb->length) {
490 xfer_done = 1;
491 urb->status = 0;
492 }
493
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300494 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -0700495 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
496 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
497 dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
498 dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +0200499 (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700500 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
501 dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
502 dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
503 xfer_done);
504
505 return xfer_done;
506}
507
508/*
509 * Save the starting data toggle for the next transfer. The data toggle is
510 * saved in the QH for non-control transfers and it's saved in the QTD for
511 * control transfers.
512 */
513void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
514 struct dwc2_host_chan *chan, int chnum,
515 struct dwc2_qtd *qtd)
516{
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300517 u32 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200518 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700519
520 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
521 if (pid == TSIZ_SC_MC_PID_DATA0)
522 chan->qh->data_toggle = DWC2_HC_PID_DATA0;
523 else
524 chan->qh->data_toggle = DWC2_HC_PID_DATA1;
525 } else {
526 if (pid == TSIZ_SC_MC_PID_DATA0)
527 qtd->data_toggle = DWC2_HC_PID_DATA0;
528 else
529 qtd->data_toggle = DWC2_HC_PID_DATA1;
530 }
531}
532
533/**
534 * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
535 * the transfer is stopped for any reason. The fields of the current entry in
536 * the frame descriptor array are set based on the transfer state and the input
537 * halt_status. Completes the Isochronous URB if all the URB frames have been
538 * completed.
539 *
540 * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
541 * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
542 */
543static enum dwc2_halt_status dwc2_update_isoc_urb_state(
544 struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
545 int chnum, struct dwc2_qtd *qtd,
546 enum dwc2_halt_status halt_status)
547{
548 struct dwc2_hcd_iso_packet_desc *frame_desc;
549 struct dwc2_hcd_urb *urb = qtd->urb;
550
551 if (!urb)
552 return DWC2_HC_XFER_NO_HALT_STATUS;
553
554 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
555
556 switch (halt_status) {
557 case DWC2_HC_XFER_COMPLETE:
558 frame_desc->status = 0;
559 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
560 chan, chnum, qtd, halt_status, NULL);
561
562 /* Non DWORD-aligned buffer case handling */
Gregory Herrerodb62b9a2015-04-29 22:09:16 +0200563 if (chan->align_buf && frame_desc->actual_length) {
Paul Zimmerman0cf31f82013-07-13 14:53:52 -0700564 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
565 __func__);
Gregory Herrerodb62b9a2015-04-29 22:09:16 +0200566 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
567 chan->qh->dw_align_buf_size,
568 chan->ep_is_in ?
569 DMA_FROM_DEVICE : DMA_TO_DEVICE);
570 if (chan->ep_is_in)
571 memcpy(urb->buf + frame_desc->offset +
572 qtd->isoc_split_offset,
573 chan->qh->dw_align_buf,
574 frame_desc->actual_length);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700575 }
576 break;
577 case DWC2_HC_XFER_FRAME_OVERRUN:
578 urb->error_count++;
579 if (chan->ep_is_in)
580 frame_desc->status = -ENOSR;
581 else
582 frame_desc->status = -ECOMM;
583 frame_desc->actual_length = 0;
584 break;
585 case DWC2_HC_XFER_BABBLE_ERR:
586 urb->error_count++;
587 frame_desc->status = -EOVERFLOW;
588 /* Don't need to update actual_length in this case */
589 break;
590 case DWC2_HC_XFER_XACT_ERR:
591 urb->error_count++;
592 frame_desc->status = -EPROTO;
593 frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
594 chan, chnum, qtd, halt_status, NULL);
595
596 /* Non DWORD-aligned buffer case handling */
Gregory Herrerodb62b9a2015-04-29 22:09:16 +0200597 if (chan->align_buf && frame_desc->actual_length) {
Paul Zimmerman0cf31f82013-07-13 14:53:52 -0700598 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
599 __func__);
Gregory Herrerodb62b9a2015-04-29 22:09:16 +0200600 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
601 chan->qh->dw_align_buf_size,
602 chan->ep_is_in ?
603 DMA_FROM_DEVICE : DMA_TO_DEVICE);
604 if (chan->ep_is_in)
605 memcpy(urb->buf + frame_desc->offset +
606 qtd->isoc_split_offset,
607 chan->qh->dw_align_buf,
608 frame_desc->actual_length);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700609 }
610
611 /* Skip whole frame */
612 if (chan->qh->do_split &&
613 chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
614 hsotg->core_params->dma_enable > 0) {
615 qtd->complete_split = 0;
616 qtd->isoc_split_offset = 0;
617 }
618
619 break;
620 default:
621 dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
622 halt_status);
623 break;
624 }
625
626 if (++qtd->isoc_frame_index == urb->packet_count) {
627 /*
628 * urb->status is not used for isoc transfers. The individual
629 * frame_desc statuses are used instead.
630 */
Paul Zimmerman0d012b92013-07-13 14:53:48 -0700631 dwc2_host_complete(hsotg, qtd, 0);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700632 halt_status = DWC2_HC_XFER_URB_COMPLETE;
633 } else {
634 halt_status = DWC2_HC_XFER_COMPLETE;
635 }
636
637 return halt_status;
638}
639
640/*
641 * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
642 * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
643 * still linked to the QH, the QH is added to the end of the inactive
644 * non-periodic schedule. For periodic QHs, removes the QH from the periodic
645 * schedule if no more QTDs are linked to the QH.
646 */
647static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
648 int free_qtd)
649{
650 int continue_split = 0;
651 struct dwc2_qtd *qtd;
652
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200653 if (dbg_qh(qh))
654 dev_vdbg(hsotg->dev, " %s(%p,%p,%d)\n", __func__,
655 hsotg, qh, free_qtd);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700656
657 if (list_empty(&qh->qtd_list)) {
658 dev_dbg(hsotg->dev, "## QTD list empty ##\n");
659 goto no_qtd;
660 }
661
662 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
663
664 if (qtd->complete_split)
665 continue_split = 1;
666 else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
667 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
668 continue_split = 1;
669
670 if (free_qtd) {
671 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
672 continue_split = 0;
673 }
674
675no_qtd:
676 if (qh->channel)
677 qh->channel->align_buf = 0;
678 qh->channel = NULL;
679 dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
680}
681
682/**
683 * dwc2_release_channel() - Releases a host channel for use by other transfers
684 *
685 * @hsotg: The HCD state structure
686 * @chan: The host channel to release
687 * @qtd: The QTD associated with the host channel. This QTD may be
688 * freed if the transfer is complete or an error has occurred.
689 * @halt_status: Reason the channel is being released. This status
690 * determines the actions taken by this function.
691 *
692 * Also attempts to select and queue more transactions since at least one host
693 * channel is available.
694 */
695static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
696 struct dwc2_host_chan *chan,
697 struct dwc2_qtd *qtd,
698 enum dwc2_halt_status halt_status)
699{
700 enum dwc2_transaction_type tr_type;
701 u32 haintmsk;
702 int free_qtd = 0;
703
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200704 if (dbg_hc(chan))
705 dev_vdbg(hsotg->dev, " %s: channel %d, halt_status %d\n",
706 __func__, chan->hc_num, halt_status);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700707
708 switch (halt_status) {
709 case DWC2_HC_XFER_URB_COMPLETE:
710 free_qtd = 1;
711 break;
712 case DWC2_HC_XFER_AHB_ERR:
713 case DWC2_HC_XFER_STALL:
714 case DWC2_HC_XFER_BABBLE_ERR:
715 free_qtd = 1;
716 break;
717 case DWC2_HC_XFER_XACT_ERR:
Matthijs Kooijman8509f2f2013-03-25 12:00:25 -0700718 if (qtd && qtd->error_count >= 3) {
Paul Zimmerman7359d482013-03-11 17:47:59 -0700719 dev_vdbg(hsotg->dev,
720 " Complete URB with transaction error\n");
721 free_qtd = 1;
Paul Zimmerman0d012b92013-07-13 14:53:48 -0700722 dwc2_host_complete(hsotg, qtd, -EPROTO);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700723 }
724 break;
725 case DWC2_HC_XFER_URB_DEQUEUE:
726 /*
727 * The QTD has already been removed and the QH has been
728 * deactivated. Don't want to do anything except release the
729 * host channel and try to queue more transfers.
730 */
731 goto cleanup;
732 case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
733 dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
734 free_qtd = 1;
Paul Zimmerman0d012b92013-07-13 14:53:48 -0700735 dwc2_host_complete(hsotg, qtd, -EIO);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700736 break;
737 case DWC2_HC_XFER_NO_HALT_STATUS:
738 default:
739 break;
740 }
741
742 dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
743
744cleanup:
745 /*
746 * Release the host channel for use by other transfers. The cleanup
747 * function clears the channel interrupt enables and conditions, so
748 * there's no need to clear the Channel Halted interrupt separately.
749 */
750 if (!list_empty(&chan->hc_list_entry))
751 list_del(&chan->hc_list_entry);
752 dwc2_hc_cleanup(hsotg, chan);
753 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
754
Dom Cobley20f2eb92013-09-23 14:23:34 -0700755 if (hsotg->core_params->uframe_sched > 0) {
756 hsotg->available_host_channels++;
757 } else {
758 switch (chan->ep_type) {
759 case USB_ENDPOINT_XFER_CONTROL:
760 case USB_ENDPOINT_XFER_BULK:
761 hsotg->non_periodic_channels--;
762 break;
763 default:
764 /*
765 * Don't release reservations for periodic channels
766 * here. That's done when a periodic transfer is
767 * descheduled (i.e. when the QH is removed from the
768 * periodic schedule).
769 */
770 break;
771 }
Paul Zimmerman7359d482013-03-11 17:47:59 -0700772 }
773
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300774 haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700775 haintmsk &= ~(1 << chan->hc_num);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300776 dwc2_writel(haintmsk, hsotg->regs + HAINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700777
778 /* Try to queue more transfers now that there's a free channel */
779 tr_type = dwc2_hcd_select_transactions(hsotg);
780 if (tr_type != DWC2_TRANSACTION_NONE)
781 dwc2_hcd_queue_transactions(hsotg, tr_type);
782}
783
784/*
785 * Halts a host channel. If the channel cannot be halted immediately because
786 * the request queue is full, this function ensures that the FIFO empty
787 * interrupt for the appropriate queue is enabled so that the halt request can
788 * be queued when there is space in the request queue.
789 *
790 * This function may also be called in DMA mode. In that case, the channel is
791 * simply released since the core always halts the channel automatically in
792 * DMA mode.
793 */
794static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
795 struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
796 enum dwc2_halt_status halt_status)
797{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200798 if (dbg_hc(chan))
799 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700800
801 if (hsotg->core_params->dma_enable > 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200802 if (dbg_hc(chan))
803 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -0700804 dwc2_release_channel(hsotg, chan, qtd, halt_status);
805 return;
806 }
807
808 /* Slave mode processing */
809 dwc2_hc_halt(hsotg, chan, halt_status);
810
811 if (chan->halt_on_queue) {
812 u32 gintmsk;
813
814 dev_vdbg(hsotg->dev, "Halt on queue\n");
815 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
816 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
817 dev_vdbg(hsotg->dev, "control/bulk\n");
818 /*
819 * Make sure the Non-periodic Tx FIFO empty interrupt
820 * is enabled so that the non-periodic schedule will
821 * be processed
822 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300823 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700824 gintmsk |= GINTSTS_NPTXFEMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300825 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700826 } else {
827 dev_vdbg(hsotg->dev, "isoc/intr\n");
828 /*
829 * Move the QH from the periodic queued schedule to
830 * the periodic assigned schedule. This allows the
831 * halt to be queued when the periodic schedule is
832 * processed.
833 */
834 list_move(&chan->qh->qh_list_entry,
835 &hsotg->periodic_sched_assigned);
836
837 /*
838 * Make sure the Periodic Tx FIFO Empty interrupt is
839 * enabled so that the periodic schedule will be
840 * processed
841 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300842 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700843 gintmsk |= GINTSTS_PTXFEMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300844 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700845 }
846 }
847}
848
849/*
850 * Performs common cleanup for non-periodic transfers after a Transfer
851 * Complete interrupt. This function should be called after any endpoint type
852 * specific handling is finished to release the host channel.
853 */
854static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
855 struct dwc2_host_chan *chan,
856 int chnum, struct dwc2_qtd *qtd,
857 enum dwc2_halt_status halt_status)
858{
859 dev_vdbg(hsotg->dev, "%s()\n", __func__);
860
861 qtd->error_count = 0;
862
863 if (chan->hcint & HCINTMSK_NYET) {
864 /*
865 * Got a NYET on the last transaction of the transfer. This
866 * means that the endpoint should be in the PING state at the
867 * beginning of the next transfer.
868 */
869 dev_vdbg(hsotg->dev, "got NYET\n");
870 chan->qh->ping_state = 1;
871 }
872
873 /*
874 * Always halt and release the host channel to make it available for
875 * more transfers. There may still be more phases for a control
876 * transfer or more data packets for a bulk transfer at this point,
877 * but the host channel is still halted. A channel will be reassigned
878 * to the transfer when the non-periodic schedule is processed after
879 * the channel is released. This allows transactions to be queued
880 * properly via dwc2_hcd_queue_transactions, which also enables the
881 * Tx FIFO Empty interrupt if necessary.
882 */
883 if (chan->ep_is_in) {
884 /*
885 * IN transfers in Slave mode require an explicit disable to
886 * halt the channel. (In DMA mode, this call simply releases
887 * the channel.)
888 */
889 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
890 } else {
891 /*
892 * The channel is automatically disabled by the core for OUT
893 * transfers in Slave mode
894 */
895 dwc2_release_channel(hsotg, chan, qtd, halt_status);
896 }
897}
898
899/*
900 * Performs common cleanup for periodic transfers after a Transfer Complete
901 * interrupt. This function should be called after any endpoint type specific
902 * handling is finished to release the host channel.
903 */
904static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
905 struct dwc2_host_chan *chan, int chnum,
906 struct dwc2_qtd *qtd,
907 enum dwc2_halt_status halt_status)
908{
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300909 u32 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -0700910
911 qtd->error_count = 0;
912
913 if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
914 /* Core halts channel in these cases */
915 dwc2_release_channel(hsotg, chan, qtd, halt_status);
916 else
917 /* Flush any outstanding requests from the Tx queue */
918 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
919}
920
921static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
922 struct dwc2_host_chan *chan, int chnum,
923 struct dwc2_qtd *qtd)
924{
925 struct dwc2_hcd_iso_packet_desc *frame_desc;
926 u32 len;
927
928 if (!qtd->urb)
929 return 0;
930
931 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
932 len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
933 DWC2_HC_XFER_COMPLETE, NULL);
934 if (!len) {
935 qtd->complete_split = 0;
936 qtd->isoc_split_offset = 0;
937 return 0;
938 }
939
940 frame_desc->actual_length += len;
941
Paul Zimmermane7e59e12013-11-25 13:42:46 -0800942 if (chan->align_buf) {
Paul Zimmerman0cf31f82013-07-13 14:53:52 -0700943 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
Gregory Herrerodb62b9a2015-04-29 22:09:16 +0200944 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
945 chan->qh->dw_align_buf_size, DMA_FROM_DEVICE);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700946 memcpy(qtd->urb->buf + frame_desc->offset +
947 qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700948 }
949
950 qtd->isoc_split_offset += len;
951
952 if (frame_desc->actual_length >= frame_desc->length) {
953 frame_desc->status = 0;
954 qtd->isoc_frame_index++;
955 qtd->complete_split = 0;
956 qtd->isoc_split_offset = 0;
957 }
958
959 if (qtd->isoc_frame_index == qtd->urb->packet_count) {
Paul Zimmerman0d012b92013-07-13 14:53:48 -0700960 dwc2_host_complete(hsotg, qtd, 0);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700961 dwc2_release_channel(hsotg, chan, qtd,
962 DWC2_HC_XFER_URB_COMPLETE);
963 } else {
964 dwc2_release_channel(hsotg, chan, qtd,
965 DWC2_HC_XFER_NO_HALT_STATUS);
966 }
967
968 return 1; /* Indicates that channel released */
969}
970
971/*
972 * Handles a host channel Transfer Complete interrupt. This handler may be
973 * called in either DMA mode or Slave mode.
974 */
975static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
976 struct dwc2_host_chan *chan, int chnum,
977 struct dwc2_qtd *qtd)
978{
979 struct dwc2_hcd_urb *urb = qtd->urb;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700980 enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
Paul Zimmerman2b54fa62014-02-12 17:44:35 -0800981 int pipe_type;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700982 int urb_xfer_done;
983
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200984 if (dbg_hc(chan))
985 dev_vdbg(hsotg->dev,
986 "--Host Channel %d Interrupt: Transfer Complete--\n",
987 chnum);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700988
Paul Zimmerman2b54fa62014-02-12 17:44:35 -0800989 if (!urb)
990 goto handle_xfercomp_done;
991
992 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
993
Paul Zimmerman7359d482013-03-11 17:47:59 -0700994 if (hsotg->core_params->dma_desc_enable > 0) {
995 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
996 if (pipe_type == USB_ENDPOINT_XFER_ISOC)
997 /* Do not disable the interrupt, just clear it */
998 return;
999 goto handle_xfercomp_done;
1000 }
1001
1002 /* Handle xfer complete on CSPLIT */
1003 if (chan->qh->do_split) {
1004 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1005 hsotg->core_params->dma_enable > 0) {
1006 if (qtd->complete_split &&
1007 dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1008 qtd))
1009 goto handle_xfercomp_done;
1010 } else {
1011 qtd->complete_split = 0;
1012 }
1013 }
1014
Paul Zimmerman7359d482013-03-11 17:47:59 -07001015 /* Update the QTD and URB states */
1016 switch (pipe_type) {
1017 case USB_ENDPOINT_XFER_CONTROL:
1018 switch (qtd->control_phase) {
1019 case DWC2_CONTROL_SETUP:
1020 if (urb->length > 0)
1021 qtd->control_phase = DWC2_CONTROL_DATA;
1022 else
1023 qtd->control_phase = DWC2_CONTROL_STATUS;
1024 dev_vdbg(hsotg->dev,
1025 " Control setup transaction done\n");
1026 halt_status = DWC2_HC_XFER_COMPLETE;
1027 break;
1028 case DWC2_CONTROL_DATA:
1029 urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1030 chnum, urb, qtd);
1031 if (urb_xfer_done) {
1032 qtd->control_phase = DWC2_CONTROL_STATUS;
1033 dev_vdbg(hsotg->dev,
1034 " Control data transfer done\n");
1035 } else {
1036 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1037 qtd);
1038 }
1039 halt_status = DWC2_HC_XFER_COMPLETE;
1040 break;
1041 case DWC2_CONTROL_STATUS:
1042 dev_vdbg(hsotg->dev, " Control transfer complete\n");
1043 if (urb->status == -EINPROGRESS)
1044 urb->status = 0;
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001045 dwc2_host_complete(hsotg, qtd, urb->status);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001046 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1047 break;
1048 }
1049
1050 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1051 halt_status);
1052 break;
1053 case USB_ENDPOINT_XFER_BULK:
1054 dev_vdbg(hsotg->dev, " Bulk transfer complete\n");
1055 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1056 qtd);
1057 if (urb_xfer_done) {
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001058 dwc2_host_complete(hsotg, qtd, urb->status);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001059 halt_status = DWC2_HC_XFER_URB_COMPLETE;
1060 } else {
1061 halt_status = DWC2_HC_XFER_COMPLETE;
1062 }
1063
1064 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1065 dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1066 halt_status);
1067 break;
1068 case USB_ENDPOINT_XFER_INT:
1069 dev_vdbg(hsotg->dev, " Interrupt transfer complete\n");
1070 urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1071 qtd);
1072
1073 /*
1074 * Interrupt URB is done on the first transfer complete
1075 * interrupt
1076 */
1077 if (urb_xfer_done) {
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001078 dwc2_host_complete(hsotg, qtd, urb->status);
1079 halt_status = DWC2_HC_XFER_URB_COMPLETE;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001080 } else {
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001081 halt_status = DWC2_HC_XFER_COMPLETE;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001082 }
1083
1084 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1085 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1086 halt_status);
1087 break;
1088 case USB_ENDPOINT_XFER_ISOC:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001089 if (dbg_perio())
1090 dev_vdbg(hsotg->dev, " Isochronous transfer complete\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -07001091 if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1092 halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1093 chnum, qtd, DWC2_HC_XFER_COMPLETE);
1094 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1095 halt_status);
1096 break;
1097 }
1098
1099handle_xfercomp_done:
1100 disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1101}
1102
1103/*
1104 * Handles a host channel STALL interrupt. This handler may be called in
1105 * either DMA mode or Slave mode.
1106 */
1107static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1108 struct dwc2_host_chan *chan, int chnum,
1109 struct dwc2_qtd *qtd)
1110{
1111 struct dwc2_hcd_urb *urb = qtd->urb;
Paul Zimmerman2b54fa62014-02-12 17:44:35 -08001112 int pipe_type;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001113
1114 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1115 chnum);
1116
1117 if (hsotg->core_params->dma_desc_enable > 0) {
1118 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1119 DWC2_HC_XFER_STALL);
1120 goto handle_stall_done;
1121 }
1122
1123 if (!urb)
1124 goto handle_stall_halt;
1125
Paul Zimmerman2b54fa62014-02-12 17:44:35 -08001126 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
1127
Paul Zimmerman7359d482013-03-11 17:47:59 -07001128 if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001129 dwc2_host_complete(hsotg, qtd, -EPIPE);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001130
1131 if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1132 pipe_type == USB_ENDPOINT_XFER_INT) {
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001133 dwc2_host_complete(hsotg, qtd, -EPIPE);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001134 /*
1135 * USB protocol requires resetting the data toggle for bulk
1136 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1137 * setup command is issued to the endpoint. Anticipate the
1138 * CLEAR_FEATURE command since a STALL has occurred and reset
1139 * the data toggle now.
1140 */
1141 chan->qh->data_toggle = 0;
1142 }
1143
1144handle_stall_halt:
1145 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1146
1147handle_stall_done:
1148 disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1149}
1150
1151/*
1152 * Updates the state of the URB when a transfer has been stopped due to an
1153 * abnormal condition before the transfer completes. Modifies the
1154 * actual_length field of the URB to reflect the number of bytes that have
1155 * actually been transferred via the host channel.
1156 */
1157static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1158 struct dwc2_host_chan *chan, int chnum,
1159 struct dwc2_hcd_urb *urb,
1160 struct dwc2_qtd *qtd,
1161 enum dwc2_halt_status halt_status)
1162{
1163 u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1164 qtd, halt_status, NULL);
1165 u32 hctsiz;
1166
1167 if (urb->actual_length + xfer_length > urb->length) {
1168 dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1169 xfer_length = urb->length - urb->actual_length;
1170 }
1171
1172 /* Non DWORD-aligned buffer case handling */
1173 if (chan->align_buf && xfer_length && chan->ep_is_in) {
Paul Zimmerman0cf31f82013-07-13 14:53:52 -07001174 dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
Gregory Herrerodb62b9a2015-04-29 22:09:16 +02001175 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
1176 chan->qh->dw_align_buf_size,
1177 chan->ep_is_in ?
1178 DMA_FROM_DEVICE : DMA_TO_DEVICE);
1179 if (chan->ep_is_in)
1180 memcpy(urb->buf + urb->actual_length,
1181 chan->qh->dw_align_buf,
1182 xfer_length);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001183 }
1184
1185 urb->actual_length += xfer_length;
1186
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001187 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001188 dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1189 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1190 dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
1191 chan->start_pkt_count);
1192 dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001193 (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001194 dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
1195 dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
1196 xfer_length);
1197 dev_vdbg(hsotg->dev, " urb->actual_length %d\n",
1198 urb->actual_length);
1199 dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n",
1200 urb->length);
1201}
1202
1203/*
1204 * Handles a host channel NAK interrupt. This handler may be called in either
1205 * DMA mode or Slave mode.
1206 */
1207static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1208 struct dwc2_host_chan *chan, int chnum,
1209 struct dwc2_qtd *qtd)
1210{
Gregory Herreroe4991232015-04-29 22:09:20 +02001211 if (!qtd) {
1212 dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
1213 return;
1214 }
1215
1216 if (!qtd->urb) {
1217 dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
1218 return;
1219 }
1220
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001221 if (dbg_hc(chan))
1222 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1223 chnum);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001224
1225 /*
1226 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1227 * interrupt. Re-start the SSPLIT transfer.
1228 */
1229 if (chan->do_split) {
1230 if (chan->complete_split)
1231 qtd->error_count = 0;
1232 qtd->complete_split = 0;
1233 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1234 goto handle_nak_done;
1235 }
1236
1237 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1238 case USB_ENDPOINT_XFER_CONTROL:
1239 case USB_ENDPOINT_XFER_BULK:
1240 if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1241 /*
1242 * NAK interrupts are enabled on bulk/control IN
1243 * transfers in DMA mode for the sole purpose of
1244 * resetting the error count after a transaction error
1245 * occurs. The core will continue transferring data.
1246 */
1247 qtd->error_count = 0;
1248 break;
1249 }
1250
1251 /*
1252 * NAK interrupts normally occur during OUT transfers in DMA
1253 * or Slave mode. For IN transfers, more requests will be
1254 * queued as request queue space is available.
1255 */
1256 qtd->error_count = 0;
1257
1258 if (!chan->qh->ping_state) {
1259 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1260 qtd, DWC2_HC_XFER_NAK);
1261 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1262
1263 if (chan->speed == USB_SPEED_HIGH)
1264 chan->qh->ping_state = 1;
1265 }
1266
1267 /*
1268 * Halt the channel so the transfer can be re-started from
1269 * the appropriate point or the PING protocol will
1270 * start/continue
1271 */
1272 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1273 break;
1274 case USB_ENDPOINT_XFER_INT:
1275 qtd->error_count = 0;
1276 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1277 break;
1278 case USB_ENDPOINT_XFER_ISOC:
1279 /* Should never get called for isochronous transfers */
1280 dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1281 break;
1282 }
1283
1284handle_nak_done:
1285 disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1286}
1287
1288/*
1289 * Handles a host channel ACK interrupt. This interrupt is enabled when
1290 * performing the PING protocol in Slave mode, when errors occur during
1291 * either Slave mode or DMA mode, and during Start Split transactions.
1292 */
1293static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1294 struct dwc2_host_chan *chan, int chnum,
1295 struct dwc2_qtd *qtd)
1296{
1297 struct dwc2_hcd_iso_packet_desc *frame_desc;
1298
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001299 if (dbg_hc(chan))
1300 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1301 chnum);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001302
1303 if (chan->do_split) {
1304 /* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1305 if (!chan->ep_is_in &&
1306 chan->data_pid_start != DWC2_HC_PID_SETUP)
1307 qtd->ssplit_out_xfer_count = chan->xfer_len;
1308
1309 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1310 qtd->complete_split = 1;
1311 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1312 } else {
1313 /* ISOC OUT */
1314 switch (chan->xact_pos) {
1315 case DWC2_HCSPLT_XACTPOS_ALL:
1316 break;
1317 case DWC2_HCSPLT_XACTPOS_END:
1318 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1319 qtd->isoc_split_offset = 0;
1320 break;
1321 case DWC2_HCSPLT_XACTPOS_BEGIN:
1322 case DWC2_HCSPLT_XACTPOS_MID:
1323 /*
1324 * For BEGIN or MID, calculate the length for
1325 * the next microframe to determine the correct
1326 * SSPLIT token, either MID or END
1327 */
1328 frame_desc = &qtd->urb->iso_descs[
1329 qtd->isoc_frame_index];
1330 qtd->isoc_split_offset += 188;
1331
1332 if (frame_desc->length - qtd->isoc_split_offset
1333 <= 188)
1334 qtd->isoc_split_pos =
1335 DWC2_HCSPLT_XACTPOS_END;
1336 else
1337 qtd->isoc_split_pos =
1338 DWC2_HCSPLT_XACTPOS_MID;
1339 break;
1340 }
1341 }
1342 } else {
1343 qtd->error_count = 0;
1344
1345 if (chan->qh->ping_state) {
1346 chan->qh->ping_state = 0;
1347 /*
1348 * Halt the channel so the transfer can be re-started
1349 * from the appropriate point. This only happens in
1350 * Slave mode. In DMA mode, the ping_state is cleared
1351 * when the transfer is started because the core
1352 * automatically executes the PING, then the transfer.
1353 */
1354 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1355 }
1356 }
1357
1358 /*
1359 * If the ACK occurred when _not_ in the PING state, let the channel
1360 * continue transferring data after clearing the error count
1361 */
1362 disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1363}
1364
1365/*
1366 * Handles a host channel NYET interrupt. This interrupt should only occur on
1367 * Bulk and Control OUT endpoints and for complete split transactions. If a
1368 * NYET occurs at the same time as a Transfer Complete interrupt, it is
1369 * handled in the xfercomp interrupt handler, not here. This handler may be
1370 * called in either DMA mode or Slave mode.
1371 */
1372static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1373 struct dwc2_host_chan *chan, int chnum,
1374 struct dwc2_qtd *qtd)
1375{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001376 if (dbg_hc(chan))
1377 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1378 chnum);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001379
1380 /*
1381 * NYET on CSPLIT
1382 * re-do the CSPLIT immediately on non-periodic
1383 */
1384 if (chan->do_split && chan->complete_split) {
1385 if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1386 hsotg->core_params->dma_enable > 0) {
1387 qtd->complete_split = 0;
1388 qtd->isoc_split_offset = 0;
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001389 qtd->isoc_frame_index++;
Paul Zimmerman7902c162013-04-22 14:00:18 -07001390 if (qtd->urb &&
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001391 qtd->isoc_frame_index == qtd->urb->packet_count) {
1392 dwc2_host_complete(hsotg, qtd, 0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001393 dwc2_release_channel(hsotg, chan, qtd,
Paul Zimmerman7902c162013-04-22 14:00:18 -07001394 DWC2_HC_XFER_URB_COMPLETE);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001395 } else {
1396 dwc2_release_channel(hsotg, chan, qtd,
1397 DWC2_HC_XFER_NO_HALT_STATUS);
1398 }
1399 goto handle_nyet_done;
1400 }
1401
1402 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1403 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1404 int frnum = dwc2_hcd_get_frame_number(hsotg);
1405
1406 if (dwc2_full_frame_num(frnum) !=
1407 dwc2_full_frame_num(chan->qh->sched_frame)) {
1408 /*
1409 * No longer in the same full speed frame.
1410 * Treat this as a transaction error.
1411 */
1412#if 0
1413 /*
1414 * Todo: Fix system performance so this can
1415 * be treated as an error. Right now complete
1416 * splits cannot be scheduled precisely enough
1417 * due to other system activity, so this error
1418 * occurs regularly in Slave mode.
1419 */
1420 qtd->error_count++;
1421#endif
1422 qtd->complete_split = 0;
1423 dwc2_halt_channel(hsotg, chan, qtd,
1424 DWC2_HC_XFER_XACT_ERR);
1425 /* Todo: add support for isoc release */
1426 goto handle_nyet_done;
1427 }
1428 }
1429
1430 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1431 goto handle_nyet_done;
1432 }
1433
1434 chan->qh->ping_state = 1;
1435 qtd->error_count = 0;
1436
1437 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1438 DWC2_HC_XFER_NYET);
1439 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1440
1441 /*
1442 * Halt the channel and re-start the transfer so the PING protocol
1443 * will start
1444 */
1445 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1446
1447handle_nyet_done:
1448 disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1449}
1450
1451/*
1452 * Handles a host channel babble interrupt. This handler may be called in
1453 * either DMA mode or Slave mode.
1454 */
1455static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1456 struct dwc2_host_chan *chan, int chnum,
1457 struct dwc2_qtd *qtd)
1458{
1459 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1460 chnum);
1461
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001462 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1463
Paul Zimmerman7359d482013-03-11 17:47:59 -07001464 if (hsotg->core_params->dma_desc_enable > 0) {
1465 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1466 DWC2_HC_XFER_BABBLE_ERR);
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001467 goto disable_int;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001468 }
1469
1470 if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001471 dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001472 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1473 } else {
1474 enum dwc2_halt_status halt_status;
1475
1476 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1477 qtd, DWC2_HC_XFER_BABBLE_ERR);
1478 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1479 }
1480
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001481disable_int:
Paul Zimmerman7359d482013-03-11 17:47:59 -07001482 disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1483}
1484
1485/*
1486 * Handles a host channel AHB error interrupt. This handler is only called in
1487 * DMA mode.
1488 */
1489static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1490 struct dwc2_host_chan *chan, int chnum,
1491 struct dwc2_qtd *qtd)
1492{
1493 struct dwc2_hcd_urb *urb = qtd->urb;
1494 char *pipetype, *speed;
1495 u32 hcchar;
1496 u32 hcsplt;
1497 u32 hctsiz;
1498 u32 hc_dma;
1499
1500 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1501 chnum);
1502
1503 if (!urb)
1504 goto handle_ahberr_halt;
1505
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001506 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1507
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001508 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum));
1509 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chnum));
1510 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
1511 hc_dma = dwc2_readl(hsotg->regs + HCDMA(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001512
1513 dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1514 dev_err(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1515 dev_err(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1516 dev_err(hsotg->dev, " Device address: %d\n",
1517 dwc2_hcd_get_dev_addr(&urb->pipe_info));
1518 dev_err(hsotg->dev, " Endpoint: %d, %s\n",
1519 dwc2_hcd_get_ep_num(&urb->pipe_info),
1520 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1521
1522 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1523 case USB_ENDPOINT_XFER_CONTROL:
1524 pipetype = "CONTROL";
1525 break;
1526 case USB_ENDPOINT_XFER_BULK:
1527 pipetype = "BULK";
1528 break;
1529 case USB_ENDPOINT_XFER_INT:
1530 pipetype = "INTERRUPT";
1531 break;
1532 case USB_ENDPOINT_XFER_ISOC:
1533 pipetype = "ISOCHRONOUS";
1534 break;
1535 default:
1536 pipetype = "UNKNOWN";
1537 break;
1538 }
1539
1540 dev_err(hsotg->dev, " Endpoint type: %s\n", pipetype);
1541
1542 switch (chan->speed) {
1543 case USB_SPEED_HIGH:
1544 speed = "HIGH";
1545 break;
1546 case USB_SPEED_FULL:
1547 speed = "FULL";
1548 break;
1549 case USB_SPEED_LOW:
1550 speed = "LOW";
1551 break;
1552 default:
1553 speed = "UNKNOWN";
1554 break;
1555 }
1556
1557 dev_err(hsotg->dev, " Speed: %s\n", speed);
1558
1559 dev_err(hsotg->dev, " Max packet size: %d\n",
1560 dwc2_hcd_get_mps(&urb->pipe_info));
1561 dev_err(hsotg->dev, " Data buffer length: %d\n", urb->length);
Paul Zimmerman157dfaa2013-03-14 13:12:00 -07001562 dev_err(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
1563 urb->buf, (unsigned long)urb->dma);
1564 dev_err(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
1565 urb->setup_packet, (unsigned long)urb->setup_dma);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001566 dev_err(hsotg->dev, " Interval: %d\n", urb->interval);
1567
1568 /* Core halts the channel for Descriptor DMA mode */
1569 if (hsotg->core_params->dma_desc_enable > 0) {
1570 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1571 DWC2_HC_XFER_AHB_ERR);
1572 goto handle_ahberr_done;
1573 }
1574
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001575 dwc2_host_complete(hsotg, qtd, -EIO);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001576
1577handle_ahberr_halt:
1578 /*
1579 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1580 * write to the HCCHARn register in DMA mode to force the halt.
1581 */
1582 dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1583
1584handle_ahberr_done:
Paul Zimmerman7359d482013-03-11 17:47:59 -07001585 disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1586}
1587
1588/*
1589 * Handles a host channel transaction error interrupt. This handler may be
1590 * called in either DMA mode or Slave mode.
1591 */
1592static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1593 struct dwc2_host_chan *chan, int chnum,
1594 struct dwc2_qtd *qtd)
1595{
1596 dev_dbg(hsotg->dev,
1597 "--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1598
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001599 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1600
Paul Zimmerman7359d482013-03-11 17:47:59 -07001601 if (hsotg->core_params->dma_desc_enable > 0) {
1602 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1603 DWC2_HC_XFER_XACT_ERR);
1604 goto handle_xacterr_done;
1605 }
1606
1607 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1608 case USB_ENDPOINT_XFER_CONTROL:
1609 case USB_ENDPOINT_XFER_BULK:
1610 qtd->error_count++;
1611 if (!chan->qh->ping_state) {
1612
1613 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1614 qtd, DWC2_HC_XFER_XACT_ERR);
1615 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1616 if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1617 chan->qh->ping_state = 1;
1618 }
1619
1620 /*
1621 * Halt the channel so the transfer can be re-started from
1622 * the appropriate point or the PING protocol will start
1623 */
1624 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1625 break;
1626 case USB_ENDPOINT_XFER_INT:
1627 qtd->error_count++;
1628 if (chan->do_split && chan->complete_split)
1629 qtd->complete_split = 0;
1630 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1631 break;
1632 case USB_ENDPOINT_XFER_ISOC:
1633 {
1634 enum dwc2_halt_status halt_status;
1635
1636 halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1637 chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1638 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1639 }
1640 break;
1641 }
1642
1643handle_xacterr_done:
Paul Zimmerman7359d482013-03-11 17:47:59 -07001644 disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1645}
1646
1647/*
1648 * Handles a host channel frame overrun interrupt. This handler may be called
1649 * in either DMA mode or Slave mode.
1650 */
1651static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1652 struct dwc2_host_chan *chan, int chnum,
1653 struct dwc2_qtd *qtd)
1654{
1655 enum dwc2_halt_status halt_status;
1656
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001657 if (dbg_hc(chan))
1658 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1659 chnum);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001660
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001661 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1662
Paul Zimmerman7359d482013-03-11 17:47:59 -07001663 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1664 case USB_ENDPOINT_XFER_CONTROL:
1665 case USB_ENDPOINT_XFER_BULK:
1666 break;
1667 case USB_ENDPOINT_XFER_INT:
1668 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1669 break;
1670 case USB_ENDPOINT_XFER_ISOC:
1671 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1672 qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1673 dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1674 break;
1675 }
1676
Paul Zimmerman7359d482013-03-11 17:47:59 -07001677 disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1678}
1679
1680/*
1681 * Handles a host channel data toggle error interrupt. This handler may be
1682 * called in either DMA mode or Slave mode.
1683 */
1684static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1685 struct dwc2_host_chan *chan, int chnum,
1686 struct dwc2_qtd *qtd)
1687{
1688 dev_dbg(hsotg->dev,
1689 "--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1690
1691 if (chan->ep_is_in)
1692 qtd->error_count = 0;
1693 else
1694 dev_err(hsotg->dev,
1695 "Data Toggle Error on OUT transfer, channel %d\n",
1696 chnum);
1697
1698 dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1699 disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1700}
1701
1702/*
1703 * For debug only. It checks that a valid halt status is set and that
1704 * HCCHARn.chdis is clear. If there's a problem, corrective action is
1705 * taken and a warning is issued.
1706 *
1707 * Return: true if halt status is ok, false otherwise
1708 */
1709static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1710 struct dwc2_host_chan *chan, int chnum,
1711 struct dwc2_qtd *qtd)
1712{
1713#ifdef DEBUG
1714 u32 hcchar;
1715 u32 hctsiz;
1716 u32 hcintmsk;
1717 u32 hcsplt;
1718
1719 if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1720 /*
1721 * This code is here only as a check. This condition should
1722 * never happen. Ignore the halt if it does occur.
1723 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001724 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum));
1725 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
1726 hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum));
1727 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001728 dev_dbg(hsotg->dev,
1729 "%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1730 __func__);
1731 dev_dbg(hsotg->dev,
1732 "channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1733 chnum, hcchar, hctsiz);
1734 dev_dbg(hsotg->dev,
1735 "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1736 chan->hcint, hcintmsk, hcsplt);
Matthijs Kooijman8509f2f2013-03-25 12:00:25 -07001737 if (qtd)
1738 dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1739 qtd->complete_split);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001740 dev_warn(hsotg->dev,
1741 "%s: no halt status, channel %d, ignoring interrupt\n",
1742 __func__, chnum);
1743 return false;
1744 }
1745
1746 /*
1747 * This code is here only as a check. hcchar.chdis should never be set
1748 * when the halt interrupt occurs. Halt the channel again if it does
1749 * occur.
1750 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001751 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001752 if (hcchar & HCCHAR_CHDIS) {
1753 dev_warn(hsotg->dev,
1754 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1755 __func__, hcchar);
1756 chan->halt_pending = 0;
1757 dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1758 return false;
1759 }
1760#endif
1761
1762 return true;
1763}
1764
1765/*
1766 * Handles a host Channel Halted interrupt in DMA mode. This handler
1767 * determines the reason the channel halted and proceeds accordingly.
1768 */
1769static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1770 struct dwc2_host_chan *chan, int chnum,
1771 struct dwc2_qtd *qtd)
1772{
1773 u32 hcintmsk;
1774 int out_nak_enh = 0;
1775
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001776 if (dbg_hc(chan))
1777 dev_vdbg(hsotg->dev,
1778 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1779 chnum);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001780
1781 /*
1782 * For core with OUT NAK enhancement, the flow for high-speed
1783 * CONTROL/BULK OUT is handled a little differently
1784 */
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001785 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07001786 if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1787 (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1788 chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1789 out_nak_enh = 1;
1790 }
1791 }
1792
1793 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1794 (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1795 hsotg->core_params->dma_desc_enable <= 0)) {
1796 if (hsotg->core_params->dma_desc_enable > 0)
1797 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1798 chan->halt_status);
1799 else
1800 /*
1801 * Just release the channel. A dequeue can happen on a
1802 * transfer timeout. In the case of an AHB Error, the
1803 * channel was forced to halt because there's no way to
1804 * gracefully recover.
1805 */
1806 dwc2_release_channel(hsotg, chan, qtd,
1807 chan->halt_status);
1808 return;
1809 }
1810
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001811 hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001812
1813 if (chan->hcint & HCINTMSK_XFERCOMPL) {
1814 /*
1815 * Todo: This is here because of a possible hardware bug. Spec
1816 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1817 * interrupt w/ACK bit set should occur, but I only see the
1818 * XFERCOMP bit, even with it masked out. This is a workaround
1819 * for that behavior. Should fix this when hardware is fixed.
1820 */
1821 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1822 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1823 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1824 } else if (chan->hcint & HCINTMSK_STALL) {
1825 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1826 } else if ((chan->hcint & HCINTMSK_XACTERR) &&
1827 hsotg->core_params->dma_desc_enable <= 0) {
1828 if (out_nak_enh) {
1829 if (chan->hcint &
1830 (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1831 dev_vdbg(hsotg->dev,
1832 "XactErr with NYET/NAK/ACK\n");
1833 qtd->error_count = 0;
1834 } else {
1835 dev_vdbg(hsotg->dev,
1836 "XactErr without NYET/NAK/ACK\n");
1837 }
1838 }
1839
1840 /*
1841 * Must handle xacterr before nak or ack. Could get a xacterr
1842 * at the same time as either of these on a BULK/CONTROL OUT
1843 * that started with a PING. The xacterr takes precedence.
1844 */
1845 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1846 } else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1847 hsotg->core_params->dma_desc_enable > 0) {
1848 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1849 } else if ((chan->hcint & HCINTMSK_AHBERR) &&
1850 hsotg->core_params->dma_desc_enable > 0) {
1851 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1852 } else if (chan->hcint & HCINTMSK_BBLERR) {
1853 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1854 } else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1855 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1856 } else if (!out_nak_enh) {
1857 if (chan->hcint & HCINTMSK_NYET) {
1858 /*
1859 * Must handle nyet before nak or ack. Could get a nyet
1860 * at the same time as either of those on a BULK/CONTROL
1861 * OUT that started with a PING. The nyet takes
1862 * precedence.
1863 */
1864 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1865 } else if ((chan->hcint & HCINTMSK_NAK) &&
1866 !(hcintmsk & HCINTMSK_NAK)) {
1867 /*
1868 * If nak is not masked, it's because a non-split IN
1869 * transfer is in an error state. In that case, the nak
1870 * is handled by the nak interrupt handler, not here.
1871 * Handle nak here for BULK/CONTROL OUT transfers, which
1872 * halt on a NAK to allow rewinding the buffer pointer.
1873 */
1874 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1875 } else if ((chan->hcint & HCINTMSK_ACK) &&
1876 !(hcintmsk & HCINTMSK_ACK)) {
1877 /*
1878 * If ack is not masked, it's because a non-split IN
1879 * transfer is in an error state. In that case, the ack
1880 * is handled by the ack interrupt handler, not here.
1881 * Handle ack here for split transfers. Start splits
1882 * halt on ACK.
1883 */
1884 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1885 } else {
1886 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1887 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1888 /*
1889 * A periodic transfer halted with no other
1890 * channel interrupts set. Assume it was halted
1891 * by the core because it could not be completed
1892 * in its scheduled (micro)frame.
1893 */
1894 dev_dbg(hsotg->dev,
1895 "%s: Halt channel %d (assume incomplete periodic transfer)\n",
1896 __func__, chnum);
1897 dwc2_halt_channel(hsotg, chan, qtd,
1898 DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1899 } else {
1900 dev_err(hsotg->dev,
1901 "%s: Channel %d - ChHltd set, but reason is unknown\n",
1902 __func__, chnum);
1903 dev_err(hsotg->dev,
1904 "hcint 0x%08x, intsts 0x%08x\n",
1905 chan->hcint,
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001906 dwc2_readl(hsotg->regs + GINTSTS));
Nick Hudson151d0cb2014-09-11 15:22:48 -07001907 goto error;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001908 }
1909 }
1910 } else {
1911 dev_info(hsotg->dev,
1912 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1913 chan->hcint);
Nick Hudson151d0cb2014-09-11 15:22:48 -07001914error:
1915 /* Failthrough: use 3-strikes rule */
1916 qtd->error_count++;
1917 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1918 qtd, DWC2_HC_XFER_XACT_ERR);
1919 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1920 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001921 }
1922}
1923
1924/*
1925 * Handles a host channel Channel Halted interrupt
1926 *
1927 * In slave mode, this handler is called only when the driver specifically
1928 * requests a halt. This occurs during handling other host channel interrupts
1929 * (e.g. nak, xacterr, stall, nyet, etc.).
1930 *
1931 * In DMA mode, this is the interrupt that occurs when the core has finished
1932 * processing a transfer on a channel. Other host channel interrupts (except
1933 * ahberr) are disabled in DMA mode.
1934 */
1935static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1936 struct dwc2_host_chan *chan, int chnum,
1937 struct dwc2_qtd *qtd)
1938{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001939 if (dbg_hc(chan))
1940 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1941 chnum);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001942
1943 if (hsotg->core_params->dma_enable > 0) {
1944 dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1945 } else {
1946 if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1947 return;
1948 dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1949 }
1950}
1951
Doug Andersondc873082015-10-16 16:01:32 -07001952/*
1953 * Check if the given qtd is still the top of the list (and thus valid).
1954 *
1955 * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
1956 * the qtd from the top of the list, this will return false (otherwise true).
1957 */
1958static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
1959{
1960 struct dwc2_qtd *cur_head;
1961
1962 if (qh == NULL)
1963 return false;
1964
1965 cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
1966 qtd_list_entry);
1967 return (cur_head == qtd);
1968}
1969
Paul Zimmerman7359d482013-03-11 17:47:59 -07001970/* Handles interrupt for a specific Host Channel */
1971static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
1972{
1973 struct dwc2_qtd *qtd;
1974 struct dwc2_host_chan *chan;
1975 u32 hcint, hcintmsk;
1976
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001977 chan = hsotg->hc_ptr_array[chnum];
1978
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001979 hcint = dwc2_readl(hsotg->regs + HCINT(chnum));
1980 hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001981 if (!chan) {
1982 dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001983 dwc2_writel(hcint, hsotg->regs + HCINT(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001984 return;
1985 }
1986
Rashika Kheria723a2312013-10-30 04:16:55 +05301987 if (dbg_hc(chan)) {
1988 dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
1989 chnum);
1990 dev_vdbg(hsotg->dev,
1991 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1992 hcint, hcintmsk, hcint & hcintmsk);
1993 }
1994
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001995 dwc2_writel(hcint, hsotg->regs + HCINT(chnum));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001996 chan->hcint = hcint;
1997 hcint &= hcintmsk;
1998
Matthijs Kooijman8509f2f2013-03-25 12:00:25 -07001999 /*
2000 * If the channel was halted due to a dequeue, the qtd list might
2001 * be empty or at least the first entry will not be the active qtd.
2002 * In this case, take a shortcut and just release the channel.
2003 */
2004 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
2005 /*
2006 * If the channel was halted, this should be the only
2007 * interrupt unmasked
2008 */
2009 WARN_ON(hcint != HCINTMSK_CHHLTD);
2010 if (hsotg->core_params->dma_desc_enable > 0)
2011 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2012 chan->halt_status);
2013 else
2014 dwc2_release_channel(hsotg, chan, NULL,
2015 chan->halt_status);
2016 return;
2017 }
2018
Paul Zimmerman7359d482013-03-11 17:47:59 -07002019 if (list_empty(&chan->qh->qtd_list)) {
Matthijs Kooijman8509f2f2013-03-25 12:00:25 -07002020 /*
2021 * TODO: Will this ever happen with the
2022 * DWC2_HC_XFER_URB_DEQUEUE handling above?
2023 */
Paul Zimmerman7359d482013-03-11 17:47:59 -07002024 dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2025 chnum);
2026 dev_dbg(hsotg->dev,
2027 " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2028 chan->hcint, hcintmsk, hcint);
2029 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2030 disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2031 chan->hcint = 0;
2032 return;
2033 }
2034
2035 qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2036 qtd_list_entry);
2037
2038 if (hsotg->core_params->dma_enable <= 0) {
2039 if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2040 hcint &= ~HCINTMSK_CHHLTD;
2041 }
2042
2043 if (hcint & HCINTMSK_XFERCOMPL) {
2044 dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2045 /*
2046 * If NYET occurred at same time as Xfer Complete, the NYET is
2047 * handled by the Xfer Complete interrupt handler. Don't want
2048 * to call the NYET interrupt handler in this case.
2049 */
2050 hcint &= ~HCINTMSK_NYET;
2051 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07002052
Doug Andersondc873082015-10-16 16:01:32 -07002053 if (hcint & HCINTMSK_CHHLTD) {
2054 dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2055 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2056 goto exit;
2057 }
2058 if (hcint & HCINTMSK_AHBERR) {
2059 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2060 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2061 goto exit;
2062 }
2063 if (hcint & HCINTMSK_STALL) {
2064 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2065 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2066 goto exit;
2067 }
2068 if (hcint & HCINTMSK_NAK) {
2069 dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2070 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2071 goto exit;
2072 }
2073 if (hcint & HCINTMSK_ACK) {
2074 dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2075 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2076 goto exit;
2077 }
2078 if (hcint & HCINTMSK_NYET) {
2079 dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2080 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2081 goto exit;
2082 }
2083 if (hcint & HCINTMSK_XACTERR) {
2084 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2085 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2086 goto exit;
2087 }
2088 if (hcint & HCINTMSK_BBLERR) {
2089 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2090 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2091 goto exit;
2092 }
2093 if (hcint & HCINTMSK_FRMOVRUN) {
2094 dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2095 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2096 goto exit;
2097 }
2098 if (hcint & HCINTMSK_DATATGLERR) {
2099 dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2100 if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2101 goto exit;
2102 }
2103
2104exit:
Paul Zimmerman7359d482013-03-11 17:47:59 -07002105 chan->hcint = 0;
2106}
2107
2108/*
2109 * This interrupt indicates that one or more host channels has a pending
2110 * interrupt. There are multiple conditions that can cause each host channel
2111 * interrupt. This function determines which conditions have occurred for each
2112 * host channel interrupt and handles them appropriately.
2113 */
2114static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2115{
2116 u32 haint;
2117 int i;
2118
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002119 haint = dwc2_readl(hsotg->regs + HAINT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002120 if (dbg_perio()) {
2121 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2122
2123 dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2124 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07002125
2126 for (i = 0; i < hsotg->core_params->host_channels; i++) {
2127 if (haint & (1 << i))
2128 dwc2_hc_n_intr(hsotg, i);
2129 }
2130}
2131
2132/* This function handles interrupts for the HCD */
Matthijs Kooijmanca18f4a2013-04-25 23:39:15 +02002133irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002134{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002135 u32 gintsts, dbg_gintsts;
Matthijs Kooijman6aafb002013-04-25 23:39:14 +02002136 irqreturn_t retval = IRQ_NONE;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002137
Paul Zimmerman54216ac2013-11-25 13:42:44 -08002138 if (!dwc2_is_controller_alive(hsotg)) {
Paul Zimmerman057715f2013-11-22 16:43:51 -08002139 dev_warn(hsotg->dev, "Controller is dead\n");
Matthijs Kooijman6aafb002013-04-25 23:39:14 +02002140 return retval;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002141 }
2142
2143 spin_lock(&hsotg->lock);
2144
2145 /* Check if HOST Mode */
2146 if (dwc2_is_host_mode(hsotg)) {
2147 gintsts = dwc2_read_core_intr(hsotg);
2148 if (!gintsts) {
2149 spin_unlock(&hsotg->lock);
Matthijs Kooijman6aafb002013-04-25 23:39:14 +02002150 return retval;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002151 }
2152
Matthijs Kooijman6aafb002013-04-25 23:39:14 +02002153 retval = IRQ_HANDLED;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002154
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002155 dbg_gintsts = gintsts;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002156#ifndef DEBUG_SOF
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002157 dbg_gintsts &= ~GINTSTS_SOF;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002158#endif
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002159 if (!dbg_perio())
2160 dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2161 GINTSTS_PTXFEMP);
2162
2163 /* Only print if there are any non-suppressed interrupts left */
2164 if (dbg_gintsts)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002165 dev_vdbg(hsotg->dev,
2166 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2167 gintsts);
2168
2169 if (gintsts & GINTSTS_SOF)
2170 dwc2_sof_intr(hsotg);
2171 if (gintsts & GINTSTS_RXFLVL)
2172 dwc2_rx_fifo_level_intr(hsotg);
2173 if (gintsts & GINTSTS_NPTXFEMP)
2174 dwc2_np_tx_fifo_empty_intr(hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002175 if (gintsts & GINTSTS_PRTINT)
2176 dwc2_port_intr(hsotg);
2177 if (gintsts & GINTSTS_HCHINT)
2178 dwc2_hc_intr(hsotg);
2179 if (gintsts & GINTSTS_PTXFEMP)
2180 dwc2_perio_tx_fifo_empty_intr(hsotg);
2181
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002182 if (dbg_gintsts) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07002183 dev_vdbg(hsotg->dev,
2184 "DWC OTG HCD Finished Servicing Interrupts\n");
2185 dev_vdbg(hsotg->dev,
2186 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002187 dwc2_readl(hsotg->regs + GINTSTS),
2188 dwc2_readl(hsotg->regs + GINTMSK));
Paul Zimmerman7359d482013-03-11 17:47:59 -07002189 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07002190 }
2191
2192 spin_unlock(&hsotg->lock);
2193
2194 return retval;
2195}