blob: 516929220aa5bf77e52bfdf0e03bbcbd443c316a [file] [log] [blame]
Greg Kroah-Hartman5fd54ac2017-11-03 11:28:30 +01001// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002/*
3 * hcd.c - DesignWare HS OTG Controller host-mode routines
4 *
5 * Copyright (C) 2004-2013 Synopsys, Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation; either version 2 of the License, or (at your option) any
23 * later version.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * This file contains the core HCD code, and implements the Linux hc_driver
40 * API
41 */
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/spinlock.h>
45#include <linux/interrupt.h>
Heiner Kallweit348becd2017-01-25 23:10:51 +010046#include <linux/platform_device.h>
Paul Zimmerman7359d482013-03-11 17:47:59 -070047#include <linux/dma-mapping.h>
48#include <linux/delay.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/usb.h>
52
53#include <linux/usb/hcd.h>
54#include <linux/usb/ch11.h>
55
56#include "core.h"
57#include "hcd.h"
58
Chen Yu9156a7e2017-01-23 14:59:57 -080059static void dwc2_port_resume(struct dwc2_hsotg *hsotg);
60
John Younb02038fa2016-02-23 19:55:00 -080061/*
62 * =========================================================================
63 * Host Core Layer Functions
64 * =========================================================================
65 */
66
67/**
68 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
69 * used in both device and host modes
70 *
71 * @hsotg: Programming view of the DWC_otg controller
72 */
73static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
74{
75 u32 intmsk;
76
77 /* Clear any pending OTG Interrupts */
78 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
79
80 /* Clear any pending interrupts */
81 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
82
83 /* Enable the interrupts in the GINTMSK */
84 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
85
John Youn95832c02017-01-23 14:57:26 -080086 if (!hsotg->params.host_dma)
John Younb02038fa2016-02-23 19:55:00 -080087 intmsk |= GINTSTS_RXFLVL;
John Youn95832c02017-01-23 14:57:26 -080088 if (!hsotg->params.external_id_pin_ctl)
John Younb02038fa2016-02-23 19:55:00 -080089 intmsk |= GINTSTS_CONIDSTSCHNG;
90
91 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
92 GINTSTS_SESSREQINT;
93
94 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
95}
96
97/*
98 * Initializes the FSLSPClkSel field of the HCFG register depending on the
99 * PHY type
100 */
101static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
102{
103 u32 hcfg, val;
104
105 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
106 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
John Youn95832c02017-01-23 14:57:26 -0800107 hsotg->params.ulpi_fs_ls) ||
John Younbea8e862016-11-03 17:55:53 -0700108 hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
John Younb02038fa2016-02-23 19:55:00 -0800109 /* Full speed PHY */
110 val = HCFG_FSLSPCLKSEL_48_MHZ;
111 } else {
112 /* High speed PHY running at full speed or high speed */
113 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
114 }
115
116 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
117 hcfg = dwc2_readl(hsotg->regs + HCFG);
118 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
119 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
120 dwc2_writel(hcfg, hsotg->regs + HCFG);
121}
122
123static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
124{
Bruno Herrerae35b1352017-01-31 23:25:43 -0200125 u32 usbcfg, ggpio, i2cctl;
John Younb02038fa2016-02-23 19:55:00 -0800126 int retval = 0;
127
128 /*
129 * core_init() is now called on every switch so only call the
130 * following for the first time through
131 */
132 if (select_phy) {
133 dev_dbg(hsotg->dev, "FS PHY selected\n");
134
135 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
136 if (!(usbcfg & GUSBCFG_PHYSEL)) {
137 usbcfg |= GUSBCFG_PHYSEL;
138 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
139
140 /* Reset after a PHY select */
141 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
142
143 if (retval) {
144 dev_err(hsotg->dev,
145 "%s: Reset failed, aborting", __func__);
146 return retval;
147 }
148 }
Bruno Herrerae35b1352017-01-31 23:25:43 -0200149
150 if (hsotg->params.activate_stm_fs_transceiver) {
151 ggpio = dwc2_readl(hsotg->regs + GGPIO);
152 if (!(ggpio & GGPIO_STM32_OTG_GCCFG_PWRDWN)) {
153 dev_dbg(hsotg->dev, "Activating transceiver\n");
154 /*
155 * STM32F4x9 uses the GGPIO register as general
156 * core configuration register.
157 */
158 ggpio |= GGPIO_STM32_OTG_GCCFG_PWRDWN;
159 dwc2_writel(ggpio, hsotg->regs + GGPIO);
160 }
161 }
John Younb02038fa2016-02-23 19:55:00 -0800162 }
163
164 /*
165 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
166 * do this on HNP Dev/Host mode switches (done in dev_init and
167 * host_init).
168 */
169 if (dwc2_is_host_mode(hsotg))
170 dwc2_init_fs_ls_pclk_sel(hsotg);
171
John Youn95832c02017-01-23 14:57:26 -0800172 if (hsotg->params.i2c_enable) {
John Younb02038fa2016-02-23 19:55:00 -0800173 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
174
175 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
176 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
177 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
178 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
179
180 /* Program GI2CCTL.I2CEn */
181 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
182 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
183 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
184 i2cctl &= ~GI2CCTL_I2CEN;
185 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
186 i2cctl |= GI2CCTL_I2CEN;
187 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
188 }
189
190 return retval;
191}
192
193static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
194{
195 u32 usbcfg, usbcfg_old;
196 int retval = 0;
197
198 if (!select_phy)
199 return 0;
200
201 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
202 usbcfg_old = usbcfg;
203
204 /*
205 * HS PHY parameters. These parameters are preserved during soft reset
206 * so only program the first time. Do a soft reset immediately after
207 * setting phyif.
208 */
John Younbea8e862016-11-03 17:55:53 -0700209 switch (hsotg->params.phy_type) {
John Younb02038fa2016-02-23 19:55:00 -0800210 case DWC2_PHY_TYPE_PARAM_ULPI:
211 /* ULPI interface */
212 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
213 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
214 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
John Youn95832c02017-01-23 14:57:26 -0800215 if (hsotg->params.phy_ulpi_ddr)
John Younb02038fa2016-02-23 19:55:00 -0800216 usbcfg |= GUSBCFG_DDRSEL;
Dinh Nguyenb11633c2017-10-16 08:57:18 -0500217
218 /* Set external VBUS indicator as needed. */
219 if (hsotg->params.oc_disable)
220 usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND |
221 GUSBCFG_INDICATORPASSTHROUGH);
John Younb02038fa2016-02-23 19:55:00 -0800222 break;
223 case DWC2_PHY_TYPE_PARAM_UTMI:
224 /* UTMI+ interface */
225 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
226 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
John Younbea8e862016-11-03 17:55:53 -0700227 if (hsotg->params.phy_utmi_width == 16)
John Younb02038fa2016-02-23 19:55:00 -0800228 usbcfg |= GUSBCFG_PHYIF16;
229 break;
230 default:
231 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
232 break;
233 }
234
235 if (usbcfg != usbcfg_old) {
236 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
237
238 /* Reset after setting the PHY parameters */
239 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
240 if (retval) {
241 dev_err(hsotg->dev,
242 "%s: Reset failed, aborting", __func__);
243 return retval;
244 }
245 }
246
247 return retval;
248}
249
250static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
251{
252 u32 usbcfg;
253 int retval = 0;
254
Vardan Mikayelyan38e90022016-11-14 19:17:03 -0800255 if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
256 hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
John Younbea8e862016-11-03 17:55:53 -0700257 hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
Vardan Mikayelyan38e90022016-11-14 19:17:03 -0800258 /* If FS/LS mode with FS/LS PHY */
John Younb02038fa2016-02-23 19:55:00 -0800259 retval = dwc2_fs_phy_init(hsotg, select_phy);
260 if (retval)
261 return retval;
262 } else {
263 /* High speed PHY */
264 retval = dwc2_hs_phy_init(hsotg, select_phy);
265 if (retval)
266 return retval;
267 }
268
269 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
270 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
John Youn95832c02017-01-23 14:57:26 -0800271 hsotg->params.ulpi_fs_ls) {
John Younb02038fa2016-02-23 19:55:00 -0800272 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
273 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
274 usbcfg |= GUSBCFG_ULPI_FS_LS;
275 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
276 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
277 } else {
278 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
279 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
280 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
281 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
282 }
283
284 return retval;
285}
286
287static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
288{
289 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
290
291 switch (hsotg->hw_params.arch) {
292 case GHWCFG2_EXT_DMA_ARCH:
293 dev_err(hsotg->dev, "External DMA Mode not supported\n");
294 return -EINVAL;
295
296 case GHWCFG2_INT_DMA_ARCH:
297 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
John Younbea8e862016-11-03 17:55:53 -0700298 if (hsotg->params.ahbcfg != -1) {
John Younb02038fa2016-02-23 19:55:00 -0800299 ahbcfg &= GAHBCFG_CTRL_MASK;
John Younbea8e862016-11-03 17:55:53 -0700300 ahbcfg |= hsotg->params.ahbcfg &
John Younb02038fa2016-02-23 19:55:00 -0800301 ~GAHBCFG_CTRL_MASK;
302 }
303 break;
304
305 case GHWCFG2_SLAVE_ONLY_ARCH:
306 default:
307 dev_dbg(hsotg->dev, "Slave Only Mode\n");
308 break;
309 }
310
John Youne7839f92016-11-03 17:56:07 -0700311 dev_dbg(hsotg->dev, "host_dma:%d dma_desc_enable:%d\n",
312 hsotg->params.host_dma,
John Younbea8e862016-11-03 17:55:53 -0700313 hsotg->params.dma_desc_enable);
John Younb02038fa2016-02-23 19:55:00 -0800314
John Youn95832c02017-01-23 14:57:26 -0800315 if (hsotg->params.host_dma) {
316 if (hsotg->params.dma_desc_enable)
John Younb02038fa2016-02-23 19:55:00 -0800317 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
318 else
319 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
320 } else {
321 dev_dbg(hsotg->dev, "Using Slave mode\n");
John Youn95832c02017-01-23 14:57:26 -0800322 hsotg->params.dma_desc_enable = false;
John Younb02038fa2016-02-23 19:55:00 -0800323 }
324
John Youn95832c02017-01-23 14:57:26 -0800325 if (hsotg->params.host_dma)
John Younb02038fa2016-02-23 19:55:00 -0800326 ahbcfg |= GAHBCFG_DMA_EN;
327
328 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
329
330 return 0;
331}
332
333static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
334{
335 u32 usbcfg;
336
337 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
338 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
339
340 switch (hsotg->hw_params.op_mode) {
341 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
John Younbea8e862016-11-03 17:55:53 -0700342 if (hsotg->params.otg_cap ==
John Younb02038fa2016-02-23 19:55:00 -0800343 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
344 usbcfg |= GUSBCFG_HNPCAP;
John Younbea8e862016-11-03 17:55:53 -0700345 if (hsotg->params.otg_cap !=
John Younb02038fa2016-02-23 19:55:00 -0800346 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
347 usbcfg |= GUSBCFG_SRPCAP;
348 break;
349
350 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
351 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
352 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
John Younbea8e862016-11-03 17:55:53 -0700353 if (hsotg->params.otg_cap !=
John Younb02038fa2016-02-23 19:55:00 -0800354 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
355 usbcfg |= GUSBCFG_SRPCAP;
356 break;
357
358 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
359 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
360 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
361 default:
362 break;
363 }
364
365 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
366}
367
368/**
369 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
370 *
371 * @hsotg: Programming view of DWC_otg controller
372 */
373static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
374{
375 u32 intmsk;
376
377 dev_dbg(hsotg->dev, "%s()\n", __func__);
378
379 /* Disable all interrupts */
380 dwc2_writel(0, hsotg->regs + GINTMSK);
381 dwc2_writel(0, hsotg->regs + HAINTMSK);
382
383 /* Enable the common interrupts */
384 dwc2_enable_common_interrupts(hsotg);
385
386 /* Enable host mode interrupts without disturbing common interrupts */
387 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
388 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
389 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
390}
391
392/**
393 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
394 *
395 * @hsotg: Programming view of DWC_otg controller
396 */
397static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
398{
399 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
400
401 /* Disable host mode interrupts without disturbing common interrupts */
402 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
403 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
404 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
405}
406
407/*
408 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
409 * For system that have a total fifo depth that is smaller than the default
410 * RX + TX fifo size.
411 *
412 * @hsotg: Programming view of DWC_otg controller
413 */
414static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
415{
John Younbea8e862016-11-03 17:55:53 -0700416 struct dwc2_core_params *params = &hsotg->params;
John Younb02038fa2016-02-23 19:55:00 -0800417 struct dwc2_hw_params *hw = &hsotg->hw_params;
418 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
419
420 total_fifo_size = hw->total_fifo_size;
421 rxfsiz = params->host_rx_fifo_size;
422 nptxfsiz = params->host_nperio_tx_fifo_size;
423 ptxfsiz = params->host_perio_tx_fifo_size;
424
425 /*
426 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
427 * allocation with support for high bandwidth endpoints. Synopsys
428 * defines MPS(Max Packet size) for a periodic EP=1024, and for
429 * non-periodic as 512.
430 */
431 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
432 /*
433 * For Buffer DMA mode/Scatter Gather DMA mode
434 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
435 * with n = number of host channel.
436 * 2 * ((1024/4) + 2) = 516
437 */
438 rxfsiz = 516 + hw->host_channels;
439
440 /*
441 * min non-periodic tx fifo depth
442 * 2 * (largest non-periodic USB packet used / 4)
443 * 2 * (512/4) = 256
444 */
445 nptxfsiz = 256;
446
447 /*
448 * min periodic tx fifo depth
449 * (largest packet size*MC)/4
450 * (1024 * 3)/4 = 768
451 */
452 ptxfsiz = 768;
453
454 params->host_rx_fifo_size = rxfsiz;
455 params->host_nperio_tx_fifo_size = nptxfsiz;
456 params->host_perio_tx_fifo_size = ptxfsiz;
457 }
458
459 /*
460 * If the summation of RX, NPTX and PTX fifo sizes is still
461 * bigger than the total_fifo_size, then we have a problem.
462 *
463 * We won't be able to allocate as many endpoints. Right now,
464 * we're just printing an error message, but ideally this FIFO
465 * allocation algorithm would be improved in the future.
466 *
467 * FIXME improve this FIFO allocation algorithm.
468 */
469 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
470 dev_err(hsotg->dev, "invalid fifo sizes\n");
471}
472
473static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
474{
John Younbea8e862016-11-03 17:55:53 -0700475 struct dwc2_core_params *params = &hsotg->params;
John Younb02038fa2016-02-23 19:55:00 -0800476 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
477
478 if (!params->enable_dynamic_fifo)
479 return;
480
481 dwc2_calculate_dynamic_fifo(hsotg);
482
483 /* Rx FIFO */
484 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
485 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
486 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
487 grxfsiz |= params->host_rx_fifo_size <<
488 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
489 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
490 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
491 dwc2_readl(hsotg->regs + GRXFSIZ));
492
493 /* Non-periodic Tx FIFO */
494 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
495 dwc2_readl(hsotg->regs + GNPTXFSIZ));
496 nptxfsiz = params->host_nperio_tx_fifo_size <<
497 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
498 nptxfsiz |= params->host_rx_fifo_size <<
499 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
500 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
501 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
502 dwc2_readl(hsotg->regs + GNPTXFSIZ));
503
504 /* Periodic Tx FIFO */
505 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
506 dwc2_readl(hsotg->regs + HPTXFSIZ));
507 hptxfsiz = params->host_perio_tx_fifo_size <<
508 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
509 hptxfsiz |= (params->host_rx_fifo_size +
510 params->host_nperio_tx_fifo_size) <<
511 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
512 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
513 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
514 dwc2_readl(hsotg->regs + HPTXFSIZ));
515
John Youn95832c02017-01-23 14:57:26 -0800516 if (hsotg->params.en_multiple_tx_fifo &&
Sevak Arakelyane1f411d2017-01-23 15:01:01 -0800517 hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) {
John Younb02038fa2016-02-23 19:55:00 -0800518 /*
Sevak Arakelyane1f411d2017-01-23 15:01:01 -0800519 * This feature was implemented in 2.91a version
John Younb02038fa2016-02-23 19:55:00 -0800520 * Global DFIFOCFG calculation for Host mode -
521 * include RxFIFO, NPTXFIFO and HPTXFIFO
522 */
523 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
524 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
525 dfifocfg |= (params->host_rx_fifo_size +
526 params->host_nperio_tx_fifo_size +
527 params->host_perio_tx_fifo_size) <<
528 GDFIFOCFG_EPINFOBASE_SHIFT &
529 GDFIFOCFG_EPINFOBASE_MASK;
530 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
531 }
532}
533
534/**
535 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
536 * the HFIR register according to PHY type and speed
537 *
538 * @hsotg: Programming view of DWC_otg controller
539 *
540 * NOTE: The caller can modify the value of the HFIR register only after the
541 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
542 * has been set
543 */
544u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
545{
546 u32 usbcfg;
547 u32 hprt0;
548 int clock = 60; /* default value */
549
550 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
551 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
552
553 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
554 !(usbcfg & GUSBCFG_PHYIF16))
555 clock = 60;
556 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
557 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
558 clock = 48;
559 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
560 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
561 clock = 30;
562 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
563 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
564 clock = 60;
565 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
566 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
567 clock = 48;
568 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
569 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
570 clock = 48;
571 if ((usbcfg & GUSBCFG_PHYSEL) &&
572 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
573 clock = 48;
574
575 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
576 /* High speed case */
577 return 125 * clock - 1;
578
579 /* FS/LS case */
580 return 1000 * clock - 1;
581}
582
583/**
584 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
585 * buffer
586 *
587 * @core_if: Programming view of DWC_otg controller
588 * @dest: Destination buffer for the packet
589 * @bytes: Number of bytes to copy to the destination
590 */
591void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
592{
593 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
594 u32 *data_buf = (u32 *)dest;
595 int word_count = (bytes + 3) / 4;
596 int i;
597
598 /*
599 * Todo: Account for the case where dest is not dword aligned. This
600 * requires reading data from the FIFO into a u32 temp buffer, then
601 * moving it into the data buffer.
602 */
603
604 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
605
606 for (i = 0; i < word_count; i++, data_buf++)
607 *data_buf = dwc2_readl(fifo);
608}
609
Paul Zimmerman7359d482013-03-11 17:47:59 -0700610/**
611 * dwc2_dump_channel_info() - Prints the state of a host channel
612 *
613 * @hsotg: Programming view of DWC_otg controller
614 * @chan: Pointer to the channel to dump
615 *
616 * Must be called with interrupt disabled and spinlock held
617 *
618 * NOTE: This function will be removed once the peripheral controller code
619 * is integrated and the driver is stable
620 */
621static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
622 struct dwc2_host_chan *chan)
623{
624#ifdef VERBOSE_DEBUG
John Younbea8e862016-11-03 17:55:53 -0700625 int num_channels = hsotg->params.host_channels;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700626 struct dwc2_qh *qh;
627 u32 hcchar;
628 u32 hcsplt;
629 u32 hctsiz;
630 u32 hc_dma;
631 int i;
632
John Younb02038fa2016-02-23 19:55:00 -0800633 if (!chan)
Paul Zimmerman7359d482013-03-11 17:47:59 -0700634 return;
635
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300636 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
637 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
638 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chan->hc_num));
639 hc_dma = dwc2_readl(hsotg->regs + HCDMA(chan->hc_num));
Paul Zimmerman7359d482013-03-11 17:47:59 -0700640
641 dev_dbg(hsotg->dev, " Assigned to channel %p:\n", chan);
642 dev_dbg(hsotg->dev, " hcchar 0x%08x, hcsplt 0x%08x\n",
643 hcchar, hcsplt);
644 dev_dbg(hsotg->dev, " hctsiz 0x%08x, hc_dma 0x%08x\n",
645 hctsiz, hc_dma);
646 dev_dbg(hsotg->dev, " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
647 chan->dev_addr, chan->ep_num, chan->ep_is_in);
648 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
649 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
650 dev_dbg(hsotg->dev, " data_pid_start: %d\n", chan->data_pid_start);
651 dev_dbg(hsotg->dev, " xfer_started: %d\n", chan->xfer_started);
652 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
653 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
654 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
655 (unsigned long)chan->xfer_dma);
656 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
657 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
658 dev_dbg(hsotg->dev, " NP inactive sched:\n");
659 list_for_each_entry(qh, &hsotg->non_periodic_sched_inactive,
660 qh_list_entry)
661 dev_dbg(hsotg->dev, " %p\n", qh);
Douglas Anderson38d2b5f2017-12-12 10:30:31 -0800662 dev_dbg(hsotg->dev, " NP waiting sched:\n");
663 list_for_each_entry(qh, &hsotg->non_periodic_sched_waiting,
664 qh_list_entry)
665 dev_dbg(hsotg->dev, " %p\n", qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700666 dev_dbg(hsotg->dev, " NP active sched:\n");
667 list_for_each_entry(qh, &hsotg->non_periodic_sched_active,
668 qh_list_entry)
669 dev_dbg(hsotg->dev, " %p\n", qh);
670 dev_dbg(hsotg->dev, " Channels:\n");
671 for (i = 0; i < num_channels; i++) {
672 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
673
674 dev_dbg(hsotg->dev, " %2d: %p\n", i, chan);
675 }
676#endif /* VERBOSE_DEBUG */
677}
678
Razmik Karapetyan4411beb2016-11-16 15:34:04 -0800679static int _dwc2_hcd_start(struct usb_hcd *hcd);
680
681static void dwc2_host_start(struct dwc2_hsotg *hsotg)
682{
683 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
684
685 hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
686 _dwc2_hcd_start(hcd);
687}
688
689static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
690{
691 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
692
693 hcd->self.is_b_host = 0;
694}
695
696static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
697 int *hub_addr, int *hub_port)
698{
699 struct urb *urb = context;
700
701 if (urb->dev->tt)
702 *hub_addr = urb->dev->tt->hub->devnum;
703 else
704 *hub_addr = 0;
705 *hub_port = urb->dev->ttport;
706}
707
Paul Zimmerman7359d482013-03-11 17:47:59 -0700708/*
John Younb02038fa2016-02-23 19:55:00 -0800709 * =========================================================================
710 * Low Level Host Channel Access Functions
711 * =========================================================================
712 */
713
714static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
715 struct dwc2_host_chan *chan)
716{
717 u32 hcintmsk = HCINTMSK_CHHLTD;
718
719 switch (chan->ep_type) {
720 case USB_ENDPOINT_XFER_CONTROL:
721 case USB_ENDPOINT_XFER_BULK:
722 dev_vdbg(hsotg->dev, "control/bulk\n");
723 hcintmsk |= HCINTMSK_XFERCOMPL;
724 hcintmsk |= HCINTMSK_STALL;
725 hcintmsk |= HCINTMSK_XACTERR;
726 hcintmsk |= HCINTMSK_DATATGLERR;
727 if (chan->ep_is_in) {
728 hcintmsk |= HCINTMSK_BBLERR;
729 } else {
730 hcintmsk |= HCINTMSK_NAK;
731 hcintmsk |= HCINTMSK_NYET;
732 if (chan->do_ping)
733 hcintmsk |= HCINTMSK_ACK;
734 }
735
736 if (chan->do_split) {
737 hcintmsk |= HCINTMSK_NAK;
738 if (chan->complete_split)
739 hcintmsk |= HCINTMSK_NYET;
740 else
741 hcintmsk |= HCINTMSK_ACK;
742 }
743
744 if (chan->error_state)
745 hcintmsk |= HCINTMSK_ACK;
746 break;
747
748 case USB_ENDPOINT_XFER_INT:
749 if (dbg_perio())
750 dev_vdbg(hsotg->dev, "intr\n");
751 hcintmsk |= HCINTMSK_XFERCOMPL;
752 hcintmsk |= HCINTMSK_NAK;
753 hcintmsk |= HCINTMSK_STALL;
754 hcintmsk |= HCINTMSK_XACTERR;
755 hcintmsk |= HCINTMSK_DATATGLERR;
756 hcintmsk |= HCINTMSK_FRMOVRUN;
757
758 if (chan->ep_is_in)
759 hcintmsk |= HCINTMSK_BBLERR;
760 if (chan->error_state)
761 hcintmsk |= HCINTMSK_ACK;
762 if (chan->do_split) {
763 if (chan->complete_split)
764 hcintmsk |= HCINTMSK_NYET;
765 else
766 hcintmsk |= HCINTMSK_ACK;
767 }
768 break;
769
770 case USB_ENDPOINT_XFER_ISOC:
771 if (dbg_perio())
772 dev_vdbg(hsotg->dev, "isoc\n");
773 hcintmsk |= HCINTMSK_XFERCOMPL;
774 hcintmsk |= HCINTMSK_FRMOVRUN;
775 hcintmsk |= HCINTMSK_ACK;
776
777 if (chan->ep_is_in) {
778 hcintmsk |= HCINTMSK_XACTERR;
779 hcintmsk |= HCINTMSK_BBLERR;
780 }
781 break;
782 default:
783 dev_err(hsotg->dev, "## Unknown EP type ##\n");
784 break;
785 }
786
787 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
788 if (dbg_hc(chan))
789 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
790}
791
792static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
793 struct dwc2_host_chan *chan)
794{
795 u32 hcintmsk = HCINTMSK_CHHLTD;
796
797 /*
798 * For Descriptor DMA mode core halts the channel on AHB error.
799 * Interrupt is not required.
800 */
John Youn95832c02017-01-23 14:57:26 -0800801 if (!hsotg->params.dma_desc_enable) {
John Younb02038fa2016-02-23 19:55:00 -0800802 if (dbg_hc(chan))
803 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
804 hcintmsk |= HCINTMSK_AHBERR;
805 } else {
806 if (dbg_hc(chan))
807 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
808 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
809 hcintmsk |= HCINTMSK_XFERCOMPL;
810 }
811
812 if (chan->error_state && !chan->do_split &&
813 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
814 if (dbg_hc(chan))
815 dev_vdbg(hsotg->dev, "setting ACK\n");
816 hcintmsk |= HCINTMSK_ACK;
817 if (chan->ep_is_in) {
818 hcintmsk |= HCINTMSK_DATATGLERR;
819 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
820 hcintmsk |= HCINTMSK_NAK;
821 }
822 }
823
824 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
825 if (dbg_hc(chan))
826 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
827}
828
829static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
830 struct dwc2_host_chan *chan)
831{
832 u32 intmsk;
833
John Youn95832c02017-01-23 14:57:26 -0800834 if (hsotg->params.host_dma) {
John Younb02038fa2016-02-23 19:55:00 -0800835 if (dbg_hc(chan))
836 dev_vdbg(hsotg->dev, "DMA enabled\n");
837 dwc2_hc_enable_dma_ints(hsotg, chan);
838 } else {
839 if (dbg_hc(chan))
840 dev_vdbg(hsotg->dev, "DMA disabled\n");
841 dwc2_hc_enable_slave_ints(hsotg, chan);
842 }
843
844 /* Enable the top level host channel interrupt */
845 intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
846 intmsk |= 1 << chan->hc_num;
847 dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
848 if (dbg_hc(chan))
849 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
850
851 /* Make sure host channel interrupts are enabled */
852 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
853 intmsk |= GINTSTS_HCHINT;
854 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
855 if (dbg_hc(chan))
856 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
857}
858
859/**
860 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
861 * a specific endpoint
862 *
863 * @hsotg: Programming view of DWC_otg controller
864 * @chan: Information needed to initialize the host channel
865 *
866 * The HCCHARn register is set up with the characteristics specified in chan.
867 * Host channel interrupts that may need to be serviced while this transfer is
868 * in progress are enabled.
869 */
870static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
871{
872 u8 hc_num = chan->hc_num;
873 u32 hcintmsk;
874 u32 hcchar;
875 u32 hcsplt = 0;
876
877 if (dbg_hc(chan))
878 dev_vdbg(hsotg->dev, "%s()\n", __func__);
879
880 /* Clear old interrupt conditions for this host channel */
881 hcintmsk = 0xffffffff;
882 hcintmsk &= ~HCINTMSK_RESERVED14_31;
883 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
884
885 /* Enable channel interrupts required for this transfer */
886 dwc2_hc_enable_ints(hsotg, chan);
887
888 /*
889 * Program the HCCHARn register with the endpoint characteristics for
890 * the current transfer
891 */
892 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
893 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
894 if (chan->ep_is_in)
895 hcchar |= HCCHAR_EPDIR;
896 if (chan->speed == USB_SPEED_LOW)
897 hcchar |= HCCHAR_LSPDDEV;
898 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
899 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
900 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
901 if (dbg_hc(chan)) {
902 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
903 hc_num, hcchar);
904
905 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
906 __func__, hc_num);
907 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
908 chan->dev_addr);
909 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
910 chan->ep_num);
911 dev_vdbg(hsotg->dev, " Is In: %d\n",
912 chan->ep_is_in);
913 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
914 chan->speed == USB_SPEED_LOW);
915 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
916 chan->ep_type);
917 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
918 chan->max_packet);
919 }
920
921 /* Program the HCSPLT register for SPLITs */
922 if (chan->do_split) {
923 if (dbg_hc(chan))
924 dev_vdbg(hsotg->dev,
925 "Programming HC %d with split --> %s\n",
926 hc_num,
927 chan->complete_split ? "CSPLIT" : "SSPLIT");
928 if (chan->complete_split)
929 hcsplt |= HCSPLT_COMPSPLT;
930 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
931 HCSPLT_XACTPOS_MASK;
932 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
933 HCSPLT_HUBADDR_MASK;
934 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
935 HCSPLT_PRTADDR_MASK;
936 if (dbg_hc(chan)) {
937 dev_vdbg(hsotg->dev, " comp split %d\n",
938 chan->complete_split);
939 dev_vdbg(hsotg->dev, " xact pos %d\n",
940 chan->xact_pos);
941 dev_vdbg(hsotg->dev, " hub addr %d\n",
942 chan->hub_addr);
943 dev_vdbg(hsotg->dev, " hub port %d\n",
944 chan->hub_port);
945 dev_vdbg(hsotg->dev, " is_in %d\n",
946 chan->ep_is_in);
947 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
948 chan->max_packet);
949 dev_vdbg(hsotg->dev, " xferlen %d\n",
950 chan->xfer_len);
951 }
952 }
953
954 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
955}
956
957/**
958 * dwc2_hc_halt() - Attempts to halt a host channel
959 *
960 * @hsotg: Controller register interface
961 * @chan: Host channel to halt
962 * @halt_status: Reason for halting the channel
963 *
964 * This function should only be called in Slave mode or to abort a transfer in
965 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
966 * controller halts the channel when the transfer is complete or a condition
967 * occurs that requires application intervention.
968 *
969 * In slave mode, checks for a free request queue entry, then sets the Channel
970 * Enable and Channel Disable bits of the Host Channel Characteristics
971 * register of the specified channel to intiate the halt. If there is no free
972 * request queue entry, sets only the Channel Disable bit of the HCCHARn
973 * register to flush requests for this channel. In the latter case, sets a
974 * flag to indicate that the host channel needs to be halted when a request
975 * queue slot is open.
976 *
977 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
978 * HCCHARn register. The controller ensures there is space in the request
979 * queue before submitting the halt request.
980 *
981 * Some time may elapse before the core flushes any posted requests for this
982 * host channel and halts. The Channel Halted interrupt handler completes the
983 * deactivation of the host channel.
984 */
985void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
986 enum dwc2_halt_status halt_status)
987{
988 u32 nptxsts, hptxsts, hcchar;
989
990 if (dbg_hc(chan))
991 dev_vdbg(hsotg->dev, "%s()\n", __func__);
992 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
993 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
994
995 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
996 halt_status == DWC2_HC_XFER_AHB_ERR) {
997 /*
998 * Disable all channel interrupts except Ch Halted. The QTD
999 * and QH state associated with this transfer has been cleared
1000 * (in the case of URB_DEQUEUE), so the channel needs to be
1001 * shut down carefully to prevent crashes.
1002 */
1003 u32 hcintmsk = HCINTMSK_CHHLTD;
1004
1005 dev_vdbg(hsotg->dev, "dequeue/error\n");
1006 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
1007
1008 /*
1009 * Make sure no other interrupts besides halt are currently
1010 * pending. Handling another interrupt could cause a crash due
1011 * to the QTD and QH state.
1012 */
1013 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1014
1015 /*
1016 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1017 * even if the channel was already halted for some other
1018 * reason
1019 */
1020 chan->halt_status = halt_status;
1021
1022 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1023 if (!(hcchar & HCCHAR_CHENA)) {
1024 /*
1025 * The channel is either already halted or it hasn't
1026 * started yet. In DMA mode, the transfer may halt if
1027 * it finishes normally or a condition occurs that
1028 * requires driver intervention. Don't want to halt
1029 * the channel again. In either Slave or DMA mode,
1030 * it's possible that the transfer has been assigned
1031 * to a channel, but not started yet when an URB is
1032 * dequeued. Don't want to halt a channel that hasn't
1033 * started yet.
1034 */
1035 return;
1036 }
1037 }
1038 if (chan->halt_pending) {
1039 /*
1040 * A halt has already been issued for this channel. This might
1041 * happen when a transfer is aborted by a higher level in
1042 * the stack.
1043 */
1044 dev_vdbg(hsotg->dev,
1045 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1046 __func__, chan->hc_num);
1047 return;
1048 }
1049
1050 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1051
1052 /* No need to set the bit in DDMA for disabling the channel */
1053 /* TODO check it everywhere channel is disabled */
John Youn95832c02017-01-23 14:57:26 -08001054 if (!hsotg->params.dma_desc_enable) {
John Younb02038fa2016-02-23 19:55:00 -08001055 if (dbg_hc(chan))
1056 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
1057 hcchar |= HCCHAR_CHENA;
1058 } else {
1059 if (dbg_hc(chan))
1060 dev_dbg(hsotg->dev, "desc DMA enabled\n");
1061 }
1062 hcchar |= HCCHAR_CHDIS;
1063
John Youn95832c02017-01-23 14:57:26 -08001064 if (!hsotg->params.host_dma) {
John Younb02038fa2016-02-23 19:55:00 -08001065 if (dbg_hc(chan))
1066 dev_vdbg(hsotg->dev, "DMA not enabled\n");
1067 hcchar |= HCCHAR_CHENA;
1068
1069 /* Check for space in the request queue to issue the halt */
1070 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1071 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1072 dev_vdbg(hsotg->dev, "control/bulk\n");
1073 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
1074 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1075 dev_vdbg(hsotg->dev, "Disabling channel\n");
1076 hcchar &= ~HCCHAR_CHENA;
1077 }
1078 } else {
1079 if (dbg_perio())
1080 dev_vdbg(hsotg->dev, "isoc/intr\n");
1081 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
1082 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1083 hsotg->queuing_high_bandwidth) {
1084 if (dbg_perio())
1085 dev_vdbg(hsotg->dev, "Disabling channel\n");
1086 hcchar &= ~HCCHAR_CHENA;
1087 }
1088 }
1089 } else {
1090 if (dbg_hc(chan))
1091 dev_vdbg(hsotg->dev, "DMA enabled\n");
1092 }
1093
1094 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1095 chan->halt_status = halt_status;
1096
1097 if (hcchar & HCCHAR_CHENA) {
1098 if (dbg_hc(chan))
1099 dev_vdbg(hsotg->dev, "Channel enabled\n");
1100 chan->halt_pending = 1;
1101 chan->halt_on_queue = 0;
1102 } else {
1103 if (dbg_hc(chan))
1104 dev_vdbg(hsotg->dev, "Channel disabled\n");
1105 chan->halt_on_queue = 1;
1106 }
1107
1108 if (dbg_hc(chan)) {
1109 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1110 chan->hc_num);
1111 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1112 hcchar);
1113 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1114 chan->halt_pending);
1115 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1116 chan->halt_on_queue);
1117 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1118 chan->halt_status);
1119 }
1120}
1121
1122/**
1123 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1124 *
1125 * @hsotg: Programming view of DWC_otg controller
1126 * @chan: Identifies the host channel to clean up
1127 *
1128 * This function is normally called after a transfer is done and the host
1129 * channel is being released
1130 */
1131void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1132{
1133 u32 hcintmsk;
1134
1135 chan->xfer_started = 0;
1136
1137 list_del_init(&chan->split_order_list_entry);
1138
1139 /*
1140 * Clear channel interrupt enables and any unhandled channel interrupt
1141 * conditions
1142 */
1143 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1144 hcintmsk = 0xffffffff;
1145 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1146 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1147}
1148
1149/**
1150 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1151 * which frame a periodic transfer should occur
1152 *
1153 * @hsotg: Programming view of DWC_otg controller
1154 * @chan: Identifies the host channel to set up and its properties
1155 * @hcchar: Current value of the HCCHAR register for the specified host channel
1156 *
1157 * This function has no effect on non-periodic transfers
1158 */
1159static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1160 struct dwc2_host_chan *chan, u32 *hcchar)
1161{
1162 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1163 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1164 int host_speed;
1165 int xfer_ns;
1166 int xfer_us;
1167 int bytes_in_fifo;
1168 u16 fifo_space;
1169 u16 frame_number;
1170 u16 wire_frame;
1171
1172 /*
1173 * Try to figure out if we're an even or odd frame. If we set
1174 * even and the current frame number is even the the transfer
1175 * will happen immediately. Similar if both are odd. If one is
1176 * even and the other is odd then the transfer will happen when
1177 * the frame number ticks.
1178 *
1179 * There's a bit of a balancing act to get this right.
1180 * Sometimes we may want to send data in the current frame (AK
1181 * right away). We might want to do this if the frame number
1182 * _just_ ticked, but we might also want to do this in order
1183 * to continue a split transaction that happened late in a
1184 * microframe (so we didn't know to queue the next transfer
1185 * until the frame number had ticked). The problem is that we
1186 * need a lot of knowledge to know if there's actually still
1187 * time to send things or if it would be better to wait until
1188 * the next frame.
1189 *
1190 * We can look at how much time is left in the current frame
1191 * and make a guess about whether we'll have time to transfer.
1192 * We'll do that.
1193 */
1194
1195 /* Get speed host is running at */
1196 host_speed = (chan->speed != USB_SPEED_HIGH &&
1197 !chan->do_split) ? chan->speed : USB_SPEED_HIGH;
1198
1199 /* See how many bytes are in the periodic FIFO right now */
1200 fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) &
1201 TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
1202 bytes_in_fifo = sizeof(u32) *
John Younbea8e862016-11-03 17:55:53 -07001203 (hsotg->params.host_perio_tx_fifo_size -
John Younb02038fa2016-02-23 19:55:00 -08001204 fifo_space);
1205
1206 /*
1207 * Roughly estimate bus time for everything in the periodic
1208 * queue + our new transfer. This is "rough" because we're
1209 * using a function that makes takes into account IN/OUT
1210 * and INT/ISO and we're just slamming in one value for all
1211 * transfers. This should be an over-estimate and that should
1212 * be OK, but we can probably tighten it.
1213 */
1214 xfer_ns = usb_calc_bus_time(host_speed, false, false,
1215 chan->xfer_len + bytes_in_fifo);
1216 xfer_us = NS_TO_US(xfer_ns);
1217
1218 /* See what frame number we'll be at by the time we finish */
1219 frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us);
1220
1221 /* This is when we were scheduled to be on the wire */
1222 wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1);
1223
1224 /*
1225 * If we'd finish _after_ the frame we're scheduled in then
1226 * it's hopeless. Just schedule right away and hope for the
1227 * best. Note that it _might_ be wise to call back into the
1228 * scheduler to pick a better frame, but this is better than
1229 * nothing.
1230 */
1231 if (dwc2_frame_num_gt(frame_number, wire_frame)) {
1232 dwc2_sch_vdbg(hsotg,
1233 "QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
1234 chan->qh, wire_frame, frame_number,
1235 dwc2_frame_num_dec(frame_number,
1236 wire_frame));
1237 wire_frame = frame_number;
1238
1239 /*
1240 * We picked a different frame number; communicate this
1241 * back to the scheduler so it doesn't try to schedule
1242 * another in the same frame.
1243 *
1244 * Remember that next_active_frame is 1 before the wire
1245 * frame.
1246 */
1247 chan->qh->next_active_frame =
1248 dwc2_frame_num_dec(frame_number, 1);
1249 }
1250
1251 if (wire_frame & 1)
1252 *hcchar |= HCCHAR_ODDFRM;
1253 else
1254 *hcchar &= ~HCCHAR_ODDFRM;
1255 }
1256}
1257
1258static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1259{
1260 /* Set up the initial PID for the transfer */
1261 if (chan->speed == USB_SPEED_HIGH) {
1262 if (chan->ep_is_in) {
1263 if (chan->multi_count == 1)
1264 chan->data_pid_start = DWC2_HC_PID_DATA0;
1265 else if (chan->multi_count == 2)
1266 chan->data_pid_start = DWC2_HC_PID_DATA1;
1267 else
1268 chan->data_pid_start = DWC2_HC_PID_DATA2;
1269 } else {
1270 if (chan->multi_count == 1)
1271 chan->data_pid_start = DWC2_HC_PID_DATA0;
1272 else
1273 chan->data_pid_start = DWC2_HC_PID_MDATA;
1274 }
1275 } else {
1276 chan->data_pid_start = DWC2_HC_PID_DATA0;
1277 }
1278}
1279
1280/**
1281 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1282 * the Host Channel
1283 *
1284 * @hsotg: Programming view of DWC_otg controller
1285 * @chan: Information needed to initialize the host channel
1286 *
1287 * This function should only be called in Slave mode. For a channel associated
1288 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1289 * associated with a periodic EP, the periodic Tx FIFO is written.
1290 *
1291 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1292 * the number of bytes written to the Tx FIFO.
1293 */
1294static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1295 struct dwc2_host_chan *chan)
1296{
1297 u32 i;
1298 u32 remaining_count;
1299 u32 byte_count;
1300 u32 dword_count;
1301 u32 __iomem *data_fifo;
1302 u32 *data_buf = (u32 *)chan->xfer_buf;
1303
1304 if (dbg_hc(chan))
1305 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1306
1307 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1308
1309 remaining_count = chan->xfer_len - chan->xfer_count;
1310 if (remaining_count > chan->max_packet)
1311 byte_count = chan->max_packet;
1312 else
1313 byte_count = remaining_count;
1314
1315 dword_count = (byte_count + 3) / 4;
1316
1317 if (((unsigned long)data_buf & 0x3) == 0) {
1318 /* xfer_buf is DWORD aligned */
1319 for (i = 0; i < dword_count; i++, data_buf++)
1320 dwc2_writel(*data_buf, data_fifo);
1321 } else {
1322 /* xfer_buf is not DWORD aligned */
1323 for (i = 0; i < dword_count; i++, data_buf++) {
1324 u32 data = data_buf[0] | data_buf[1] << 8 |
1325 data_buf[2] << 16 | data_buf[3] << 24;
1326 dwc2_writel(data, data_fifo);
1327 }
1328 }
1329
1330 chan->xfer_count += byte_count;
1331 chan->xfer_buf += byte_count;
1332}
1333
1334/**
1335 * dwc2_hc_do_ping() - Starts a PING transfer
1336 *
1337 * @hsotg: Programming view of DWC_otg controller
1338 * @chan: Information needed to initialize the host channel
1339 *
1340 * This function should only be called in Slave mode. The Do Ping bit is set in
1341 * the HCTSIZ register, then the channel is enabled.
1342 */
1343static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
1344 struct dwc2_host_chan *chan)
1345{
1346 u32 hcchar;
1347 u32 hctsiz;
1348
1349 if (dbg_hc(chan))
1350 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1351 chan->hc_num);
1352
1353 hctsiz = TSIZ_DOPNG;
1354 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1355 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1356
1357 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1358 hcchar |= HCCHAR_CHENA;
1359 hcchar &= ~HCCHAR_CHDIS;
1360 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1361}
1362
1363/**
1364 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1365 * channel and starts the transfer
1366 *
1367 * @hsotg: Programming view of DWC_otg controller
1368 * @chan: Information needed to initialize the host channel. The xfer_len value
1369 * may be reduced to accommodate the max widths of the XferSize and
1370 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1371 * changed to reflect the final xfer_len value.
1372 *
1373 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1374 * the caller must ensure that there is sufficient space in the request queue
1375 * and Tx Data FIFO.
1376 *
1377 * For an OUT transfer in Slave mode, it loads a data packet into the
1378 * appropriate FIFO. If necessary, additional data packets are loaded in the
1379 * Host ISR.
1380 *
1381 * For an IN transfer in Slave mode, a data packet is requested. The data
1382 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1383 * additional data packets are requested in the Host ISR.
1384 *
1385 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1386 * register along with a packet count of 1 and the channel is enabled. This
1387 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1388 * simply set to 0 since no data transfer occurs in this case.
1389 *
1390 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1391 * all the information required to perform the subsequent data transfer. In
1392 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1393 * controller performs the entire PING protocol, then starts the data
1394 * transfer.
1395 */
1396static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1397 struct dwc2_host_chan *chan)
1398{
John Younbea8e862016-11-03 17:55:53 -07001399 u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
1400 u16 max_hc_pkt_count = hsotg->params.max_packet_count;
John Younb02038fa2016-02-23 19:55:00 -08001401 u32 hcchar;
1402 u32 hctsiz = 0;
1403 u16 num_packets;
1404 u32 ec_mc;
1405
1406 if (dbg_hc(chan))
1407 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1408
1409 if (chan->do_ping) {
John Youn95832c02017-01-23 14:57:26 -08001410 if (!hsotg->params.host_dma) {
John Younb02038fa2016-02-23 19:55:00 -08001411 if (dbg_hc(chan))
1412 dev_vdbg(hsotg->dev, "ping, no DMA\n");
1413 dwc2_hc_do_ping(hsotg, chan);
1414 chan->xfer_started = 1;
1415 return;
1416 }
1417
1418 if (dbg_hc(chan))
1419 dev_vdbg(hsotg->dev, "ping, DMA\n");
1420
1421 hctsiz |= TSIZ_DOPNG;
1422 }
1423
1424 if (chan->do_split) {
1425 if (dbg_hc(chan))
1426 dev_vdbg(hsotg->dev, "split\n");
1427 num_packets = 1;
1428
1429 if (chan->complete_split && !chan->ep_is_in)
1430 /*
1431 * For CSPLIT OUT Transfer, set the size to 0 so the
1432 * core doesn't expect any data written to the FIFO
1433 */
1434 chan->xfer_len = 0;
1435 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1436 chan->xfer_len = chan->max_packet;
1437 else if (!chan->ep_is_in && chan->xfer_len > 188)
1438 chan->xfer_len = 188;
1439
1440 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1441 TSIZ_XFERSIZE_MASK;
1442
1443 /* For split set ec_mc for immediate retries */
1444 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1445 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1446 ec_mc = 3;
1447 else
1448 ec_mc = 1;
1449 } else {
1450 if (dbg_hc(chan))
1451 dev_vdbg(hsotg->dev, "no split\n");
1452 /*
1453 * Ensure that the transfer length and packet count will fit
1454 * in the widths allocated for them in the HCTSIZn register
1455 */
1456 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1457 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1458 /*
1459 * Make sure the transfer size is no larger than one
1460 * (micro)frame's worth of data. (A check was done
1461 * when the periodic transfer was accepted to ensure
1462 * that a (micro)frame's worth of data can be
1463 * programmed into a channel.)
1464 */
1465 u32 max_periodic_len =
1466 chan->multi_count * chan->max_packet;
1467
1468 if (chan->xfer_len > max_periodic_len)
1469 chan->xfer_len = max_periodic_len;
1470 } else if (chan->xfer_len > max_hc_xfer_size) {
1471 /*
1472 * Make sure that xfer_len is a multiple of max packet
1473 * size
1474 */
1475 chan->xfer_len =
1476 max_hc_xfer_size - chan->max_packet + 1;
1477 }
1478
1479 if (chan->xfer_len > 0) {
1480 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1481 chan->max_packet;
1482 if (num_packets > max_hc_pkt_count) {
1483 num_packets = max_hc_pkt_count;
1484 chan->xfer_len = num_packets * chan->max_packet;
1485 }
1486 } else {
1487 /* Need 1 packet for transfer length of 0 */
1488 num_packets = 1;
1489 }
1490
1491 if (chan->ep_is_in)
1492 /*
1493 * Always program an integral # of max packets for IN
1494 * transfers
1495 */
1496 chan->xfer_len = num_packets * chan->max_packet;
1497
1498 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1499 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1500 /*
1501 * Make sure that the multi_count field matches the
1502 * actual transfer length
1503 */
1504 chan->multi_count = num_packets;
1505
1506 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1507 dwc2_set_pid_isoc(chan);
1508
1509 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1510 TSIZ_XFERSIZE_MASK;
1511
1512 /* The ec_mc gets the multi_count for non-split */
1513 ec_mc = chan->multi_count;
1514 }
1515
1516 chan->start_pkt_count = num_packets;
1517 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1518 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1519 TSIZ_SC_MC_PID_MASK;
1520 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1521 if (dbg_hc(chan)) {
1522 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1523 hctsiz, chan->hc_num);
1524
1525 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1526 chan->hc_num);
1527 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1528 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1529 TSIZ_XFERSIZE_SHIFT);
1530 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1531 (hctsiz & TSIZ_PKTCNT_MASK) >>
1532 TSIZ_PKTCNT_SHIFT);
1533 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1534 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1535 TSIZ_SC_MC_PID_SHIFT);
1536 }
1537
John Youn95832c02017-01-23 14:57:26 -08001538 if (hsotg->params.host_dma) {
John Younb02038fa2016-02-23 19:55:00 -08001539 dwc2_writel((u32)chan->xfer_dma,
1540 hsotg->regs + HCDMA(chan->hc_num));
1541 if (dbg_hc(chan))
1542 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1543 (unsigned long)chan->xfer_dma, chan->hc_num);
1544 }
1545
1546 /* Start the split */
1547 if (chan->do_split) {
1548 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
1549
1550 hcsplt |= HCSPLT_SPLTENA;
1551 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1552 }
1553
1554 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1555 hcchar &= ~HCCHAR_MULTICNT_MASK;
1556 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
1557 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1558
1559 if (hcchar & HCCHAR_CHDIS)
1560 dev_warn(hsotg->dev,
1561 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1562 __func__, chan->hc_num, hcchar);
1563
1564 /* Set host channel enable after all other setup is complete */
1565 hcchar |= HCCHAR_CHENA;
1566 hcchar &= ~HCCHAR_CHDIS;
1567
1568 if (dbg_hc(chan))
1569 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1570 (hcchar & HCCHAR_MULTICNT_MASK) >>
1571 HCCHAR_MULTICNT_SHIFT);
1572
1573 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1574 if (dbg_hc(chan))
1575 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1576 chan->hc_num);
1577
1578 chan->xfer_started = 1;
1579 chan->requests++;
1580
John Youn95832c02017-01-23 14:57:26 -08001581 if (!hsotg->params.host_dma &&
John Younb02038fa2016-02-23 19:55:00 -08001582 !chan->ep_is_in && chan->xfer_len > 0)
1583 /* Load OUT packet into the appropriate Tx FIFO */
1584 dwc2_hc_write_packet(hsotg, chan);
1585}
1586
1587/**
1588 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1589 * host channel and starts the transfer in Descriptor DMA mode
1590 *
1591 * @hsotg: Programming view of DWC_otg controller
1592 * @chan: Information needed to initialize the host channel
1593 *
1594 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1595 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1596 * with micro-frame bitmap.
1597 *
1598 * Initializes HCDMA register with descriptor list address and CTD value then
1599 * starts the transfer via enabling the channel.
1600 */
1601void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1602 struct dwc2_host_chan *chan)
1603{
1604 u32 hcchar;
1605 u32 hctsiz = 0;
1606
1607 if (chan->do_ping)
1608 hctsiz |= TSIZ_DOPNG;
1609
1610 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1611 dwc2_set_pid_isoc(chan);
1612
1613 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1614 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1615 TSIZ_SC_MC_PID_MASK;
1616
1617 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1618 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1619
1620 /* Non-zero only for high-speed interrupt endpoints */
1621 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1622
1623 if (dbg_hc(chan)) {
1624 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1625 chan->hc_num);
1626 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1627 chan->data_pid_start);
1628 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1629 }
1630
1631 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1632
1633 dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
1634 chan->desc_list_sz, DMA_TO_DEVICE);
1635
1636 dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
1637
1638 if (dbg_hc(chan))
1639 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
1640 &chan->desc_list_addr, chan->hc_num);
1641
1642 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1643 hcchar &= ~HCCHAR_MULTICNT_MASK;
1644 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1645 HCCHAR_MULTICNT_MASK;
1646
1647 if (hcchar & HCCHAR_CHDIS)
1648 dev_warn(hsotg->dev,
1649 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1650 __func__, chan->hc_num, hcchar);
1651
1652 /* Set host channel enable after all other setup is complete */
1653 hcchar |= HCCHAR_CHENA;
1654 hcchar &= ~HCCHAR_CHDIS;
1655
1656 if (dbg_hc(chan))
1657 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1658 (hcchar & HCCHAR_MULTICNT_MASK) >>
1659 HCCHAR_MULTICNT_SHIFT);
1660
1661 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1662 if (dbg_hc(chan))
1663 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1664 chan->hc_num);
1665
1666 chan->xfer_started = 1;
1667 chan->requests++;
1668}
1669
1670/**
1671 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1672 * a previous call to dwc2_hc_start_transfer()
1673 *
1674 * @hsotg: Programming view of DWC_otg controller
1675 * @chan: Information needed to initialize the host channel
1676 *
1677 * The caller must ensure there is sufficient space in the request queue and Tx
1678 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1679 * the controller acts autonomously to complete transfers programmed to a host
1680 * channel.
1681 *
1682 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1683 * if there is any data remaining to be queued. For an IN transfer, another
1684 * data packet is always requested. For the SETUP phase of a control transfer,
1685 * this function does nothing.
1686 *
1687 * Return: 1 if a new request is queued, 0 if no more requests are required
1688 * for this transfer
1689 */
1690static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1691 struct dwc2_host_chan *chan)
1692{
1693 if (dbg_hc(chan))
1694 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1695 chan->hc_num);
1696
1697 if (chan->do_split)
1698 /* SPLITs always queue just once per channel */
1699 return 0;
1700
1701 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1702 /* SETUPs are queued only once since they can't be NAK'd */
1703 return 0;
1704
1705 if (chan->ep_is_in) {
1706 /*
1707 * Always queue another request for other IN transfers. If
1708 * back-to-back INs are issued and NAKs are received for both,
1709 * the driver may still be processing the first NAK when the
1710 * second NAK is received. When the interrupt handler clears
1711 * the NAK interrupt for the first NAK, the second NAK will
1712 * not be seen. So we can't depend on the NAK interrupt
1713 * handler to requeue a NAK'd request. Instead, IN requests
1714 * are issued each time this function is called. When the
1715 * transfer completes, the extra requests for the channel will
1716 * be flushed.
1717 */
1718 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
1719
1720 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1721 hcchar |= HCCHAR_CHENA;
1722 hcchar &= ~HCCHAR_CHDIS;
1723 if (dbg_hc(chan))
1724 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1725 hcchar);
1726 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1727 chan->requests++;
1728 return 1;
1729 }
1730
1731 /* OUT transfers */
1732
1733 if (chan->xfer_count < chan->xfer_len) {
1734 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1735 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1736 u32 hcchar = dwc2_readl(hsotg->regs +
1737 HCCHAR(chan->hc_num));
1738
1739 dwc2_hc_set_even_odd_frame(hsotg, chan,
1740 &hcchar);
1741 }
1742
1743 /* Load OUT packet into the appropriate Tx FIFO */
1744 dwc2_hc_write_packet(hsotg, chan);
1745 chan->requests++;
1746 return 1;
1747 }
1748
1749 return 0;
1750}
1751
1752/*
1753 * =========================================================================
1754 * HCD
1755 * =========================================================================
1756 */
1757
1758/*
Paul Zimmerman7359d482013-03-11 17:47:59 -07001759 * Processes all the URBs in a single list of QHs. Completes them with
1760 * -ETIMEDOUT and frees the QTD.
1761 *
1762 * Must be called with interrupt disabled and spinlock held
1763 */
1764static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
1765 struct list_head *qh_list)
1766{
1767 struct dwc2_qh *qh, *qh_tmp;
1768 struct dwc2_qtd *qtd, *qtd_tmp;
1769
1770 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
1771 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
1772 qtd_list_entry) {
Gregory Herrero2e84da62015-09-22 15:16:53 +02001773 dwc2_host_complete(hsotg, qtd, -ECONNRESET);
Paul Zimmerman0d012b92013-07-13 14:53:48 -07001774 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001775 }
1776 }
1777}
1778
1779static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
1780 struct list_head *qh_list)
1781{
1782 struct dwc2_qtd *qtd, *qtd_tmp;
1783 struct dwc2_qh *qh, *qh_tmp;
1784 unsigned long flags;
1785
1786 if (!qh_list->next)
1787 /* The list hasn't been initialized yet */
1788 return;
1789
1790 spin_lock_irqsave(&hsotg->lock, flags);
1791
1792 /* Ensure there are no QTDs or URBs left */
1793 dwc2_kill_urbs_in_qh_list(hsotg, qh_list);
1794
1795 list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
1796 dwc2_hcd_qh_unlink(hsotg, qh);
1797
1798 /* Free each QTD in the QH's QTD list */
1799 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
1800 qtd_list_entry)
1801 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1802
Douglas Anderson16e80212016-01-28 18:19:55 -08001803 if (qh->channel && qh->channel->qh == qh)
1804 qh->channel->qh = NULL;
1805
Paul Zimmerman7359d482013-03-11 17:47:59 -07001806 spin_unlock_irqrestore(&hsotg->lock, flags);
1807 dwc2_hcd_qh_free(hsotg, qh);
1808 spin_lock_irqsave(&hsotg->lock, flags);
1809 }
1810
1811 spin_unlock_irqrestore(&hsotg->lock, flags);
1812}
1813
1814/*
1815 * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic
1816 * and periodic schedules. The QTD associated with each URB is removed from
1817 * the schedule and freed. This function may be called when a disconnect is
1818 * detected or when the HCD is being stopped.
1819 *
1820 * Must be called with interrupt disabled and spinlock held
1821 */
1822static void dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
1823{
1824 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
Douglas Anderson38d2b5f2017-12-12 10:30:31 -08001825 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001826 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
1827 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
1828 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
1829 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned);
1830 dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued);
1831}
1832
1833/**
1834 * dwc2_hcd_start() - Starts the HCD when switching to Host mode
1835 *
1836 * @hsotg: Pointer to struct dwc2_hsotg
1837 */
1838void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
1839{
1840 u32 hprt0;
1841
1842 if (hsotg->op_state == OTG_STATE_B_HOST) {
1843 /*
1844 * Reset the port. During a HNP mode switch the reset
1845 * needs to occur within 1ms and have a duration of at
1846 * least 50ms.
1847 */
1848 hprt0 = dwc2_read_hprt0(hsotg);
1849 hprt0 |= HPRT0_RST;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001850 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001851 }
1852
1853 queue_delayed_work(hsotg->wq_otg, &hsotg->start_work,
1854 msecs_to_jiffies(50));
1855}
1856
1857/* Must be called with interrupt disabled and spinlock held */
1858static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
1859{
John Younbea8e862016-11-03 17:55:53 -07001860 int num_channels = hsotg->params.host_channels;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001861 struct dwc2_host_chan *channel;
1862 u32 hcchar;
1863 int i;
1864
John Youn95832c02017-01-23 14:57:26 -08001865 if (!hsotg->params.host_dma) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07001866 /* Flush out any channel requests in slave mode */
1867 for (i = 0; i < num_channels; i++) {
1868 channel = hsotg->hc_ptr_array[i];
1869 if (!list_empty(&channel->hc_list_entry))
1870 continue;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001871 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001872 if (hcchar & HCCHAR_CHENA) {
1873 hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
1874 hcchar |= HCCHAR_CHDIS;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001875 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001876 }
1877 }
1878 }
1879
1880 for (i = 0; i < num_channels; i++) {
1881 channel = hsotg->hc_ptr_array[i];
1882 if (!list_empty(&channel->hc_list_entry))
1883 continue;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001884 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001885 if (hcchar & HCCHAR_CHENA) {
1886 /* Halt the channel */
1887 hcchar |= HCCHAR_CHDIS;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001888 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
Paul Zimmerman7359d482013-03-11 17:47:59 -07001889 }
1890
1891 dwc2_hc_cleanup(hsotg, channel);
1892 list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list);
1893 /*
1894 * Added for Descriptor DMA to prevent channel double cleanup in
1895 * release_channel_ddma(), which is called from ep_disable when
1896 * device disconnects
1897 */
1898 channel->qh = NULL;
1899 }
Vincent Palatin7252f1b2015-03-15 13:24:32 -07001900 /* All channels have been freed, mark them available */
John Youn95832c02017-01-23 14:57:26 -08001901 if (hsotg->params.uframe_sched) {
Vincent Palatin7252f1b2015-03-15 13:24:32 -07001902 hsotg->available_host_channels =
John Younbea8e862016-11-03 17:55:53 -07001903 hsotg->params.host_channels;
Vincent Palatin7252f1b2015-03-15 13:24:32 -07001904 } else {
1905 hsotg->non_periodic_channels = 0;
1906 hsotg->periodic_channels = 0;
1907 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07001908}
1909
1910/**
Douglas Anderson6a659532015-11-19 13:23:14 -08001911 * dwc2_hcd_connect() - Handles connect of the HCD
Paul Zimmerman7359d482013-03-11 17:47:59 -07001912 *
1913 * @hsotg: Pointer to struct dwc2_hsotg
1914 *
1915 * Must be called with interrupt disabled and spinlock held
1916 */
Douglas Anderson6a659532015-11-19 13:23:14 -08001917void dwc2_hcd_connect(struct dwc2_hsotg *hsotg)
1918{
1919 if (hsotg->lx_state != DWC2_L0)
1920 usb_hcd_resume_root_hub(hsotg->priv);
1921
1922 hsotg->flags.b.port_connect_status_change = 1;
1923 hsotg->flags.b.port_connect_status = 1;
1924}
1925
1926/**
1927 * dwc2_hcd_disconnect() - Handles disconnect of the HCD
1928 *
1929 * @hsotg: Pointer to struct dwc2_hsotg
1930 * @force: If true, we won't try to reconnect even if we see device connected.
1931 *
1932 * Must be called with interrupt disabled and spinlock held
1933 */
1934void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
Paul Zimmerman7359d482013-03-11 17:47:59 -07001935{
1936 u32 intr;
Douglas Anderson6a659532015-11-19 13:23:14 -08001937 u32 hprt0;
Paul Zimmerman7359d482013-03-11 17:47:59 -07001938
1939 /* Set status flags for the hub driver */
1940 hsotg->flags.b.port_connect_status_change = 1;
1941 hsotg->flags.b.port_connect_status = 0;
1942
1943 /*
1944 * Shutdown any transfers in process by clearing the Tx FIFO Empty
1945 * interrupt mask and status bits and disabling subsequent host
1946 * channel interrupts.
1947 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001948 intr = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001949 intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001950 dwc2_writel(intr, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001951 intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001952 dwc2_writel(intr, hsotg->regs + GINTSTS);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001953
1954 /*
1955 * Turn off the vbus power only if the core has transitioned to device
1956 * mode. If still in host mode, need to keep power on to detect a
1957 * reconnection.
1958 */
1959 if (dwc2_is_device_mode(hsotg)) {
1960 if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
1961 dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001962 dwc2_writel(0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07001963 }
1964
1965 dwc2_disable_host_interrupts(hsotg);
1966 }
1967
1968 /* Respond with an error status to all URBs in the schedule */
1969 dwc2_kill_all_urbs(hsotg);
1970
1971 if (dwc2_is_host_mode(hsotg))
1972 /* Clean up any host channels that were in use */
1973 dwc2_hcd_cleanup_channels(hsotg);
1974
1975 dwc2_host_disconnect(hsotg);
Douglas Anderson6a659532015-11-19 13:23:14 -08001976
1977 /*
1978 * Add an extra check here to see if we're actually connected but
1979 * we don't have a detection interrupt pending. This can happen if:
1980 * 1. hardware sees connect
1981 * 2. hardware sees disconnect
1982 * 3. hardware sees connect
1983 * 4. dwc2_port_intr() - clears connect interrupt
1984 * 5. dwc2_handle_common_intr() - calls here
1985 *
1986 * Without the extra check here we will end calling disconnect
1987 * and won't get any future interrupts to handle the connect.
1988 */
1989 if (!force) {
1990 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
1991 if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
1992 dwc2_hcd_connect(hsotg);
1993 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07001994}
1995
1996/**
1997 * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup
1998 *
1999 * @hsotg: Pointer to struct dwc2_hsotg
2000 */
2001static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
2002{
Douglas Anderson1fb7f122015-10-22 13:05:03 -07002003 if (hsotg->bus_suspended) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07002004 hsotg->flags.b.port_suspend_change = 1;
Gregory Herrerob46146d52015-01-30 09:09:26 +01002005 usb_hcd_resume_root_hub(hsotg->priv);
Gregory Herrerob46146d52015-01-30 09:09:26 +01002006 }
Douglas Anderson1fb7f122015-10-22 13:05:03 -07002007
2008 if (hsotg->lx_state == DWC2_L1)
2009 hsotg->flags.b.port_l1_change = 1;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002010}
2011
2012/**
2013 * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner
2014 *
2015 * @hsotg: Pointer to struct dwc2_hsotg
2016 *
2017 * Must be called with interrupt disabled and spinlock held
2018 */
2019void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
2020{
2021 dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n");
2022
2023 /*
2024 * The root hub should be disconnected before this function is called.
2025 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
2026 * and the QH lists (via ..._hcd_endpoint_disable).
2027 */
2028
2029 /* Turn off all host-specific interrupts */
2030 dwc2_disable_host_interrupts(hsotg);
2031
2032 /* Turn off the vbus power */
2033 dev_dbg(hsotg->dev, "PortPower off\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002034 dwc2_writel(0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002035}
2036
Gregory Herrero33ad2612015-04-29 22:09:15 +02002037/* Caller must hold driver lock */
Paul Zimmerman7359d482013-03-11 17:47:59 -07002038static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02002039 struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02002040 struct dwc2_qtd *qtd)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002041{
Paul Zimmerman7359d482013-03-11 17:47:59 -07002042 u32 intr_mask;
2043 int retval;
Nick Hudson9f8144c2013-12-06 14:01:44 -08002044 int dev_speed;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002045
2046 if (!hsotg->flags.b.port_connect_status) {
2047 /* No longer connected */
2048 dev_err(hsotg->dev, "Not connected\n");
2049 return -ENODEV;
2050 }
2051
Nick Hudson9f8144c2013-12-06 14:01:44 -08002052 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
2053
2054 /* Some configurations cannot support LS traffic on a FS root port */
2055 if ((dev_speed == USB_SPEED_LOW) &&
2056 (hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) &&
2057 (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI)) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002058 u32 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
Nick Hudson9f8144c2013-12-06 14:01:44 -08002059 u32 prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
2060
2061 if (prtspd == HPRT0_SPD_FULL_SPEED)
2062 return -ENODEV;
2063 }
2064
Paul Zimmerman7359d482013-03-11 17:47:59 -07002065 if (!qtd)
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02002066 return -EINVAL;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002067
2068 dwc2_hcd_qtd_init(qtd, urb);
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02002069 retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
Paul Zimmerman9bda1aa2013-11-22 16:43:45 -08002070 if (retval) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07002071 dev_err(hsotg->dev,
2072 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
2073 retval);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002074 return retval;
2075 }
2076
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002077 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman9bda1aa2013-11-22 16:43:45 -08002078 if (!(intr_mask & GINTSTS_SOF)) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07002079 enum dwc2_transaction_type tr_type;
2080
2081 if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
2082 !(qtd->urb->flags & URB_GIVEBACK_ASAP))
2083 /*
2084 * Do not schedule SG transactions until qtd has
2085 * URB_GIVEBACK_ASAP set
2086 */
2087 return 0;
2088
Paul Zimmerman7359d482013-03-11 17:47:59 -07002089 tr_type = dwc2_hcd_select_transactions(hsotg);
2090 if (tr_type != DWC2_TRANSACTION_NONE)
2091 dwc2_hcd_queue_transactions(hsotg, tr_type);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002092 }
2093
Paul Zimmerman9bda1aa2013-11-22 16:43:45 -08002094 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002095}
2096
2097/* Must be called with interrupt disabled and spinlock held */
2098static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
2099 struct dwc2_hcd_urb *urb)
2100{
2101 struct dwc2_qh *qh;
2102 struct dwc2_qtd *urb_qtd;
2103
2104 urb_qtd = urb->qtd;
2105 if (!urb_qtd) {
2106 dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n");
2107 return -EINVAL;
2108 }
2109
2110 qh = urb_qtd->qh;
2111 if (!qh) {
2112 dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n");
2113 return -EINVAL;
2114 }
2115
Paul Zimmerman0d012b92013-07-13 14:53:48 -07002116 urb->priv = NULL;
2117
Paul Zimmerman7359d482013-03-11 17:47:59 -07002118 if (urb_qtd->in_process && qh->channel) {
2119 dwc2_dump_channel_info(hsotg, qh->channel);
2120
2121 /* The QTD is in process (it has been assigned to a channel) */
2122 if (hsotg->flags.b.port_connect_status)
2123 /*
2124 * If still connected (i.e. in host mode), halt the
2125 * channel so it can be used for other transfers. If
2126 * no longer connected, the host registers can't be
2127 * written to halt the channel since the core is in
2128 * device mode.
2129 */
2130 dwc2_hc_halt(hsotg, qh->channel,
2131 DWC2_HC_XFER_URB_DEQUEUE);
2132 }
2133
2134 /*
2135 * Free the QTD and clean up the associated QH. Leave the QH in the
2136 * schedule if it has any remaining QTDs.
2137 */
John Youn95832c02017-01-23 14:57:26 -08002138 if (!hsotg->params.dma_desc_enable) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07002139 u8 in_process = urb_qtd->in_process;
2140
2141 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
2142 if (in_process) {
2143 dwc2_hcd_qh_deactivate(hsotg, qh, 0);
2144 qh->channel = NULL;
2145 } else if (list_empty(&qh->qtd_list)) {
2146 dwc2_hcd_qh_unlink(hsotg, qh);
2147 }
2148 } else {
2149 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
2150 }
2151
2152 return 0;
2153}
2154
2155/* Must NOT be called with interrupt disabled or spinlock held */
2156static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
2157 struct usb_host_endpoint *ep, int retry)
2158{
2159 struct dwc2_qtd *qtd, *qtd_tmp;
2160 struct dwc2_qh *qh;
2161 unsigned long flags;
2162 int rc;
2163
2164 spin_lock_irqsave(&hsotg->lock, flags);
2165
2166 qh = ep->hcpriv;
2167 if (!qh) {
2168 rc = -EINVAL;
2169 goto err;
2170 }
2171
2172 while (!list_empty(&qh->qtd_list) && retry--) {
2173 if (retry == 0) {
2174 dev_err(hsotg->dev,
2175 "## timeout in dwc2_hcd_endpoint_disable() ##\n");
2176 rc = -EBUSY;
2177 goto err;
2178 }
2179
2180 spin_unlock_irqrestore(&hsotg->lock, flags);
Nicholas Mc Guire04a9db72017-01-12 16:54:03 +01002181 msleep(20);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002182 spin_lock_irqsave(&hsotg->lock, flags);
2183 qh = ep->hcpriv;
2184 if (!qh) {
2185 rc = -EINVAL;
2186 goto err;
2187 }
2188 }
2189
2190 dwc2_hcd_qh_unlink(hsotg, qh);
2191
2192 /* Free each QTD in the QH's QTD list */
2193 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry)
2194 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
2195
2196 ep->hcpriv = NULL;
Douglas Anderson16e80212016-01-28 18:19:55 -08002197
2198 if (qh->channel && qh->channel->qh == qh)
2199 qh->channel->qh = NULL;
2200
Paul Zimmerman7359d482013-03-11 17:47:59 -07002201 spin_unlock_irqrestore(&hsotg->lock, flags);
Douglas Anderson16e80212016-01-28 18:19:55 -08002202
Paul Zimmerman7359d482013-03-11 17:47:59 -07002203 dwc2_hcd_qh_free(hsotg, qh);
2204
2205 return 0;
2206
2207err:
2208 ep->hcpriv = NULL;
2209 spin_unlock_irqrestore(&hsotg->lock, flags);
2210
2211 return rc;
2212}
2213
2214/* Must be called with interrupt disabled and spinlock held */
2215static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
2216 struct usb_host_endpoint *ep)
2217{
2218 struct dwc2_qh *qh = ep->hcpriv;
2219
2220 if (!qh)
2221 return -EINVAL;
2222
2223 qh->data_toggle = DWC2_HC_PID_DATA0;
2224
2225 return 0;
2226}
2227
John Younb02038fa2016-02-23 19:55:00 -08002228/**
2229 * dwc2_core_init() - Initializes the DWC_otg controller registers and
2230 * prepares the core for device mode or host mode operation
2231 *
2232 * @hsotg: Programming view of the DWC_otg controller
2233 * @initial_setup: If true then this is the first init for this instance.
2234 */
2235static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
2236{
2237 u32 usbcfg, otgctl;
2238 int retval;
2239
2240 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
2241
2242 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2243
2244 /* Set ULPI External VBUS bit if needed */
2245 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
John Youn95832c02017-01-23 14:57:26 -08002246 if (hsotg->params.phy_ulpi_ext_vbus)
John Younb02038fa2016-02-23 19:55:00 -08002247 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
2248
2249 /* Set external TS Dline pulsing bit if needed */
2250 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
John Youn95832c02017-01-23 14:57:26 -08002251 if (hsotg->params.ts_dline)
John Younb02038fa2016-02-23 19:55:00 -08002252 usbcfg |= GUSBCFG_TERMSELDLPULSE;
2253
2254 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
2255
2256 /*
2257 * Reset the Controller
2258 *
2259 * We only need to reset the controller if this is a re-init.
2260 * For the first init we know for sure that earlier code reset us (it
2261 * needed to in order to properly detect various parameters).
2262 */
2263 if (!initial_setup) {
2264 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
2265 if (retval) {
2266 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
2267 __func__);
2268 return retval;
2269 }
2270 }
2271
2272 /*
2273 * This needs to happen in FS mode before any other programming occurs
2274 */
2275 retval = dwc2_phy_init(hsotg, initial_setup);
2276 if (retval)
2277 return retval;
2278
2279 /* Program the GAHBCFG Register */
2280 retval = dwc2_gahbcfg_init(hsotg);
2281 if (retval)
2282 return retval;
2283
2284 /* Program the GUSBCFG register */
2285 dwc2_gusbcfg_init(hsotg);
2286
2287 /* Program the GOTGCTL register */
2288 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
2289 otgctl &= ~GOTGCTL_OTGVER;
John Younb02038fa2016-02-23 19:55:00 -08002290 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
John Younb02038fa2016-02-23 19:55:00 -08002291
2292 /* Clear the SRP success bit for FS-I2c */
2293 hsotg->srp_success = 0;
2294
2295 /* Enable common interrupts */
2296 dwc2_enable_common_interrupts(hsotg);
2297
2298 /*
2299 * Do device or host initialization based on mode during PCD and
2300 * HCD initialization
2301 */
2302 if (dwc2_is_host_mode(hsotg)) {
2303 dev_dbg(hsotg->dev, "Host Mode\n");
2304 hsotg->op_state = OTG_STATE_A_HOST;
2305 } else {
2306 dev_dbg(hsotg->dev, "Device Mode\n");
2307 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
2308 }
2309
2310 return 0;
2311}
2312
2313/**
2314 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
2315 * Host mode
2316 *
2317 * @hsotg: Programming view of DWC_otg controller
2318 *
2319 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
2320 * request queues. Host channels are reset to ensure that they are ready for
2321 * performing transfers.
2322 */
2323static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
2324{
2325 u32 hcfg, hfir, otgctl;
2326
2327 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
2328
2329 /* Restart the Phy Clock */
2330 dwc2_writel(0, hsotg->regs + PCGCTL);
2331
2332 /* Initialize Host Configuration Register */
2333 dwc2_init_fs_ls_pclk_sel(hsotg);
Vardan Mikayelyan38e90022016-11-14 19:17:03 -08002334 if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
2335 hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
John Younb02038fa2016-02-23 19:55:00 -08002336 hcfg = dwc2_readl(hsotg->regs + HCFG);
2337 hcfg |= HCFG_FSLSSUPP;
2338 dwc2_writel(hcfg, hsotg->regs + HCFG);
2339 }
2340
2341 /*
2342 * This bit allows dynamic reloading of the HFIR register during
2343 * runtime. This bit needs to be programmed during initial configuration
2344 * and its value must not be changed during runtime.
2345 */
John Youn95832c02017-01-23 14:57:26 -08002346 if (hsotg->params.reload_ctl) {
John Younb02038fa2016-02-23 19:55:00 -08002347 hfir = dwc2_readl(hsotg->regs + HFIR);
2348 hfir |= HFIR_RLDCTRL;
2349 dwc2_writel(hfir, hsotg->regs + HFIR);
2350 }
2351
John Youn95832c02017-01-23 14:57:26 -08002352 if (hsotg->params.dma_desc_enable) {
John Younb02038fa2016-02-23 19:55:00 -08002353 u32 op_mode = hsotg->hw_params.op_mode;
2354
2355 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
2356 !hsotg->hw_params.dma_desc_enable ||
2357 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
2358 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
2359 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
2360 dev_err(hsotg->dev,
2361 "Hardware does not support descriptor DMA mode -\n");
2362 dev_err(hsotg->dev,
2363 "falling back to buffer DMA mode.\n");
John Youn95832c02017-01-23 14:57:26 -08002364 hsotg->params.dma_desc_enable = false;
John Younb02038fa2016-02-23 19:55:00 -08002365 } else {
2366 hcfg = dwc2_readl(hsotg->regs + HCFG);
2367 hcfg |= HCFG_DESCDMA;
2368 dwc2_writel(hcfg, hsotg->regs + HCFG);
2369 }
2370 }
2371
2372 /* Configure data FIFO sizes */
2373 dwc2_config_fifos(hsotg);
2374
2375 /* TODO - check this */
2376 /* Clear Host Set HNP Enable in the OTG Control Register */
2377 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
2378 otgctl &= ~GOTGCTL_HSTSETHNPEN;
2379 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
2380
2381 /* Make sure the FIFOs are flushed */
2382 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
2383 dwc2_flush_rx_fifo(hsotg);
2384
2385 /* Clear Host Set HNP Enable in the OTG Control Register */
2386 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
2387 otgctl &= ~GOTGCTL_HSTSETHNPEN;
2388 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
2389
John Youn95832c02017-01-23 14:57:26 -08002390 if (!hsotg->params.dma_desc_enable) {
John Younb02038fa2016-02-23 19:55:00 -08002391 int num_channels, i;
2392 u32 hcchar;
2393
2394 /* Flush out any leftover queued requests */
John Younbea8e862016-11-03 17:55:53 -07002395 num_channels = hsotg->params.host_channels;
John Younb02038fa2016-02-23 19:55:00 -08002396 for (i = 0; i < num_channels; i++) {
2397 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
2398 hcchar &= ~HCCHAR_CHENA;
2399 hcchar |= HCCHAR_CHDIS;
2400 hcchar &= ~HCCHAR_EPDIR;
2401 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
2402 }
2403
2404 /* Halt all channels to put them into a known state */
2405 for (i = 0; i < num_channels; i++) {
John Younb02038fa2016-02-23 19:55:00 -08002406 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
2407 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
2408 hcchar &= ~HCCHAR_EPDIR;
2409 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
2410 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
2411 __func__, i);
Sevak Arakelyan79d6b8c2018-01-19 14:39:31 +04002412
2413 if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i),
2414 HCCHAR_CHENA, 1000)) {
2415 dev_warn(hsotg->dev, "Unable to clear enable on channel %d\n",
2416 i);
2417 }
John Younb02038fa2016-02-23 19:55:00 -08002418 }
2419 }
2420
2421 /* Turn on the vbus power */
2422 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
2423 if (hsotg->op_state == OTG_STATE_A_HOST) {
2424 u32 hprt0 = dwc2_read_hprt0(hsotg);
2425
2426 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
2427 !!(hprt0 & HPRT0_PWR));
2428 if (!(hprt0 & HPRT0_PWR)) {
2429 hprt0 |= HPRT0_PWR;
2430 dwc2_writel(hprt0, hsotg->regs + HPRT0);
2431 }
2432 }
2433
2434 dwc2_enable_host_interrupts(hsotg);
2435}
2436
Paul Zimmerman7359d482013-03-11 17:47:59 -07002437/*
2438 * Initializes dynamic portions of the DWC_otg HCD state
2439 *
2440 * Must be called with interrupt disabled and spinlock held
2441 */
2442static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
2443{
2444 struct dwc2_host_chan *chan, *chan_tmp;
2445 int num_channels;
2446 int i;
2447
2448 hsotg->flags.d32 = 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002449 hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
Dom Cobley20f2eb92013-09-23 14:23:34 -07002450
John Youn95832c02017-01-23 14:57:26 -08002451 if (hsotg->params.uframe_sched) {
Dom Cobley20f2eb92013-09-23 14:23:34 -07002452 hsotg->available_host_channels =
John Younbea8e862016-11-03 17:55:53 -07002453 hsotg->params.host_channels;
Dom Cobley20f2eb92013-09-23 14:23:34 -07002454 } else {
2455 hsotg->non_periodic_channels = 0;
2456 hsotg->periodic_channels = 0;
2457 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07002458
2459 /*
2460 * Put all channels in the free channel list and clean up channel
2461 * states
2462 */
2463 list_for_each_entry_safe(chan, chan_tmp, &hsotg->free_hc_list,
2464 hc_list_entry)
2465 list_del_init(&chan->hc_list_entry);
2466
John Younbea8e862016-11-03 17:55:53 -07002467 num_channels = hsotg->params.host_channels;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002468 for (i = 0; i < num_channels; i++) {
2469 chan = hsotg->hc_ptr_array[i];
2470 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
2471 dwc2_hc_cleanup(hsotg, chan);
2472 }
2473
2474 /* Initialize the DWC core for host mode operation */
2475 dwc2_core_host_init(hsotg);
2476}
2477
2478static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
2479 struct dwc2_host_chan *chan,
2480 struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
2481{
2482 int hub_addr, hub_port;
2483
2484 chan->do_split = 1;
2485 chan->xact_pos = qtd->isoc_split_pos;
2486 chan->complete_split = qtd->complete_split;
2487 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
2488 chan->hub_addr = (u8)hub_addr;
2489 chan->hub_port = (u8)hub_port;
2490}
2491
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002492static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
2493 struct dwc2_host_chan *chan,
2494 struct dwc2_qtd *qtd)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002495{
2496 struct dwc2_hcd_urb *urb = qtd->urb;
2497 struct dwc2_hcd_iso_packet_desc *frame_desc;
2498
2499 switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
2500 case USB_ENDPOINT_XFER_CONTROL:
2501 chan->ep_type = USB_ENDPOINT_XFER_CONTROL;
2502
2503 switch (qtd->control_phase) {
2504 case DWC2_CONTROL_SETUP:
2505 dev_vdbg(hsotg->dev, " Control setup transaction\n");
2506 chan->do_ping = 0;
2507 chan->ep_is_in = 0;
2508 chan->data_pid_start = DWC2_HC_PID_SETUP;
John Youn95832c02017-01-23 14:57:26 -08002509 if (hsotg->params.host_dma)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002510 chan->xfer_dma = urb->setup_dma;
2511 else
2512 chan->xfer_buf = urb->setup_packet;
2513 chan->xfer_len = 8;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002514 break;
2515
2516 case DWC2_CONTROL_DATA:
2517 dev_vdbg(hsotg->dev, " Control data transaction\n");
2518 chan->data_pid_start = qtd->data_toggle;
2519 break;
2520
2521 case DWC2_CONTROL_STATUS:
2522 /*
2523 * Direction is opposite of data direction or IN if no
2524 * data
2525 */
2526 dev_vdbg(hsotg->dev, " Control status transaction\n");
2527 if (urb->length == 0)
2528 chan->ep_is_in = 1;
2529 else
2530 chan->ep_is_in =
2531 dwc2_hcd_is_pipe_out(&urb->pipe_info);
2532 if (chan->ep_is_in)
2533 chan->do_ping = 0;
2534 chan->data_pid_start = DWC2_HC_PID_DATA1;
2535 chan->xfer_len = 0;
John Youn95832c02017-01-23 14:57:26 -08002536 if (hsotg->params.host_dma)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002537 chan->xfer_dma = hsotg->status_buf_dma;
2538 else
2539 chan->xfer_buf = hsotg->status_buf;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002540 break;
2541 }
2542 break;
2543
2544 case USB_ENDPOINT_XFER_BULK:
2545 chan->ep_type = USB_ENDPOINT_XFER_BULK;
2546 break;
2547
2548 case USB_ENDPOINT_XFER_INT:
2549 chan->ep_type = USB_ENDPOINT_XFER_INT;
2550 break;
2551
2552 case USB_ENDPOINT_XFER_ISOC:
2553 chan->ep_type = USB_ENDPOINT_XFER_ISOC;
John Youn95832c02017-01-23 14:57:26 -08002554 if (hsotg->params.dma_desc_enable)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002555 break;
2556
2557 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
2558 frame_desc->status = 0;
2559
John Youn95832c02017-01-23 14:57:26 -08002560 if (hsotg->params.host_dma) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07002561 chan->xfer_dma = urb->dma;
2562 chan->xfer_dma += frame_desc->offset +
2563 qtd->isoc_split_offset;
2564 } else {
2565 chan->xfer_buf = urb->buf;
2566 chan->xfer_buf += frame_desc->offset +
2567 qtd->isoc_split_offset;
2568 }
2569
2570 chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
2571
Paul Zimmerman7359d482013-03-11 17:47:59 -07002572 if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
2573 if (chan->xfer_len <= 188)
2574 chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
2575 else
2576 chan->xact_pos = DWC2_HCSPLT_XACTPOS_BEGIN;
2577 }
2578 break;
2579 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07002580}
2581
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002582#define DWC2_USB_DMA_ALIGN 4
2583
2584struct dma_aligned_buffer {
2585 void *kmalloc_ptr;
2586 void *old_xfer_buffer;
2587 u8 data[0];
2588};
2589
2590static void dwc2_free_dma_aligned_buffer(struct urb *urb)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002591{
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002592 struct dma_aligned_buffer *temp;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002593
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002594 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2595 return;
Paul Zimmerman5dce9552014-09-16 13:47:27 -07002596
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002597 temp = container_of(urb->transfer_buffer,
John Youn9da51972017-01-17 20:30:27 -08002598 struct dma_aligned_buffer, data);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002599
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002600 if (usb_urb_dir_in(urb))
2601 memcpy(temp->old_xfer_buffer, temp->data,
2602 urb->transfer_buffer_length);
2603 urb->transfer_buffer = temp->old_xfer_buffer;
2604 kfree(temp->kmalloc_ptr);
Paul Zimmerman5dce9552014-09-16 13:47:27 -07002605
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002606 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2607}
Paul Zimmerman7359d482013-03-11 17:47:59 -07002608
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002609static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2610{
2611 struct dma_aligned_buffer *temp, *kmalloc_ptr;
2612 size_t kmalloc_size;
Gregory Herrerodb62b9a2015-04-29 22:09:16 +02002613
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002614 if (urb->num_sgs || urb->sg ||
2615 urb->transfer_buffer_length == 0 ||
2616 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
2617 return 0;
2618
2619 /* Allocate a buffer with enough padding for alignment */
2620 kmalloc_size = urb->transfer_buffer_length +
2621 sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
2622
2623 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2624 if (!kmalloc_ptr)
2625 return -ENOMEM;
2626
2627 /* Position our struct dma_aligned_buffer such that data is aligned */
2628 temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
2629 temp->kmalloc_ptr = kmalloc_ptr;
2630 temp->old_xfer_buffer = urb->transfer_buffer;
2631 if (usb_urb_dir_out(urb))
2632 memcpy(temp->data, urb->transfer_buffer,
2633 urb->transfer_buffer_length);
2634 urb->transfer_buffer = temp->data;
2635
2636 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2637
Paul Zimmerman7359d482013-03-11 17:47:59 -07002638 return 0;
2639}
2640
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002641static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
John Youn9da51972017-01-17 20:30:27 -08002642 gfp_t mem_flags)
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002643{
2644 int ret;
2645
2646 /* We assume setup_dma is always aligned; warn if not */
2647 WARN_ON_ONCE(urb->setup_dma &&
2648 (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1)));
2649
2650 ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags);
2651 if (ret)
2652 return ret;
2653
2654 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2655 if (ret)
2656 dwc2_free_dma_aligned_buffer(urb);
2657
2658 return ret;
2659}
2660
2661static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2662{
2663 usb_hcd_unmap_urb_for_dma(hcd, urb);
2664 dwc2_free_dma_aligned_buffer(urb);
2665}
2666
Paul Zimmerman7359d482013-03-11 17:47:59 -07002667/**
2668 * dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
2669 * channel and initializes the host channel to perform the transactions. The
2670 * host channel is removed from the free list.
2671 *
2672 * @hsotg: The HCD state structure
2673 * @qh: Transactions from the first QTD for this QH are selected and assigned
2674 * to a free host channel
2675 */
Dom Cobley20f2eb92013-09-23 14:23:34 -07002676static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002677{
2678 struct dwc2_host_chan *chan;
2679 struct dwc2_hcd_urb *urb;
2680 struct dwc2_qtd *qtd;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002681
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002682 if (dbg_qh(qh))
2683 dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002684
2685 if (list_empty(&qh->qtd_list)) {
2686 dev_dbg(hsotg->dev, "No QTDs in QH list\n");
Dom Cobley20f2eb92013-09-23 14:23:34 -07002687 return -ENOMEM;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002688 }
2689
2690 if (list_empty(&hsotg->free_hc_list)) {
2691 dev_dbg(hsotg->dev, "No free channel to assign\n");
Dom Cobley20f2eb92013-09-23 14:23:34 -07002692 return -ENOMEM;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002693 }
2694
2695 chan = list_first_entry(&hsotg->free_hc_list, struct dwc2_host_chan,
2696 hc_list_entry);
2697
Dom Cobley20f2eb92013-09-23 14:23:34 -07002698 /* Remove host channel from free list */
Paul Zimmerman7359d482013-03-11 17:47:59 -07002699 list_del_init(&chan->hc_list_entry);
2700
2701 qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
2702 urb = qtd->urb;
2703 qh->channel = chan;
2704 qtd->in_process = 1;
2705
2706 /*
2707 * Use usb_pipedevice to determine device address. This address is
2708 * 0 before the SET_ADDRESS command and the correct address afterward.
2709 */
2710 chan->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
2711 chan->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
2712 chan->speed = qh->dev_speed;
2713 chan->max_packet = dwc2_max_packet(qh->maxp);
2714
2715 chan->xfer_started = 0;
2716 chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2717 chan->error_state = (qtd->error_count > 0);
2718 chan->halt_on_queue = 0;
2719 chan->halt_pending = 0;
2720 chan->requests = 0;
2721
2722 /*
2723 * The following values may be modified in the transfer type section
2724 * below. The xfer_len value may be reduced when the transfer is
2725 * started to accommodate the max widths of the XferSize and PktCnt
2726 * fields in the HCTSIZn register.
2727 */
2728
2729 chan->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0);
2730 if (chan->ep_is_in)
2731 chan->do_ping = 0;
2732 else
2733 chan->do_ping = qh->ping_state;
2734
2735 chan->data_pid_start = qh->data_toggle;
2736 chan->multi_count = 1;
2737
Rashika Kheriabb6c3422013-10-26 23:11:22 +05302738 if (urb->actual_length > urb->length &&
John Youn9da51972017-01-17 20:30:27 -08002739 !dwc2_hcd_is_pipe_in(&urb->pipe_info))
Paul Zimmerman84181082013-09-23 14:23:33 -07002740 urb->actual_length = urb->length;
2741
John Youn95832c02017-01-23 14:57:26 -08002742 if (hsotg->params.host_dma)
Paul Zimmerman7359d482013-03-11 17:47:59 -07002743 chan->xfer_dma = urb->dma + urb->actual_length;
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002744 else
Paul Zimmerman7359d482013-03-11 17:47:59 -07002745 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002746
2747 chan->xfer_len = urb->length - urb->actual_length;
2748 chan->xfer_count = 0;
2749
2750 /* Set the split attributes if required */
2751 if (qh->do_split)
2752 dwc2_hc_init_split(hsotg, chan, qtd, urb);
2753 else
2754 chan->do_split = 0;
2755
2756 /* Set the transfer attributes */
Douglas Anderson3bc04e22016-01-28 18:19:53 -08002757 dwc2_hc_init_xfer(hsotg, chan, qtd);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002758
2759 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2760 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
2761 /*
2762 * This value may be modified when the transfer is started
2763 * to reflect the actual transfer length
2764 */
2765 chan->multi_count = dwc2_hb_mult(qh->maxp);
2766
John Youn95832c02017-01-23 14:57:26 -08002767 if (hsotg->params.dma_desc_enable) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07002768 chan->desc_list_addr = qh->desc_list_dma;
Gregory Herrero95105a92015-11-20 11:49:29 +01002769 chan->desc_list_sz = qh->desc_list_sz;
2770 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07002771
2772 dwc2_hc_init(hsotg, chan);
2773 chan->qh = qh;
Dom Cobley20f2eb92013-09-23 14:23:34 -07002774
2775 return 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002776}
2777
2778/**
2779 * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer
2780 * schedule and assigns them to available host channels. Called from the HCD
2781 * interrupt handler functions.
2782 *
2783 * @hsotg: The HCD state structure
2784 *
2785 * Return: The types of new transactions that were assigned to host channels
2786 */
2787enum dwc2_transaction_type dwc2_hcd_select_transactions(
2788 struct dwc2_hsotg *hsotg)
2789{
2790 enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
2791 struct list_head *qh_ptr;
2792 struct dwc2_qh *qh;
2793 int num_channels;
2794
2795#ifdef DWC2_DEBUG_SOF
2796 dev_vdbg(hsotg->dev, " Select Transactions\n");
2797#endif
2798
2799 /* Process entries in the periodic ready list */
2800 qh_ptr = hsotg->periodic_sched_ready.next;
2801 while (qh_ptr != &hsotg->periodic_sched_ready) {
2802 if (list_empty(&hsotg->free_hc_list))
2803 break;
John Youn95832c02017-01-23 14:57:26 -08002804 if (hsotg->params.uframe_sched) {
Dom Cobley20f2eb92013-09-23 14:23:34 -07002805 if (hsotg->available_host_channels <= 1)
2806 break;
2807 hsotg->available_host_channels--;
2808 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07002809 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
Dom Cobley20f2eb92013-09-23 14:23:34 -07002810 if (dwc2_assign_and_init_hc(hsotg, qh))
2811 break;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002812
2813 /*
2814 * Move the QH from the periodic ready schedule to the
2815 * periodic assigned schedule
2816 */
2817 qh_ptr = qh_ptr->next;
Douglas Anderson94ef7ae2016-01-28 18:19:56 -08002818 list_move_tail(&qh->qh_list_entry,
2819 &hsotg->periodic_sched_assigned);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002820 ret_val = DWC2_TRANSACTION_PERIODIC;
2821 }
2822
2823 /*
2824 * Process entries in the inactive portion of the non-periodic
2825 * schedule. Some free host channels may not be used if they are
2826 * reserved for periodic transfers.
2827 */
John Younbea8e862016-11-03 17:55:53 -07002828 num_channels = hsotg->params.host_channels;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002829 qh_ptr = hsotg->non_periodic_sched_inactive.next;
2830 while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
John Youn95832c02017-01-23 14:57:26 -08002831 if (!hsotg->params.uframe_sched &&
Dom Cobley20f2eb92013-09-23 14:23:34 -07002832 hsotg->non_periodic_channels >= num_channels -
Paul Zimmerman7359d482013-03-11 17:47:59 -07002833 hsotg->periodic_channels)
2834 break;
2835 if (list_empty(&hsotg->free_hc_list))
2836 break;
2837 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
John Youn95832c02017-01-23 14:57:26 -08002838 if (hsotg->params.uframe_sched) {
Dom Cobley20f2eb92013-09-23 14:23:34 -07002839 if (hsotg->available_host_channels < 1)
2840 break;
2841 hsotg->available_host_channels--;
2842 }
2843
2844 if (dwc2_assign_and_init_hc(hsotg, qh))
2845 break;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002846
2847 /*
2848 * Move the QH from the non-periodic inactive schedule to the
2849 * non-periodic active schedule
2850 */
2851 qh_ptr = qh_ptr->next;
Douglas Anderson94ef7ae2016-01-28 18:19:56 -08002852 list_move_tail(&qh->qh_list_entry,
2853 &hsotg->non_periodic_sched_active);
Paul Zimmerman7359d482013-03-11 17:47:59 -07002854
2855 if (ret_val == DWC2_TRANSACTION_NONE)
2856 ret_val = DWC2_TRANSACTION_NON_PERIODIC;
2857 else
2858 ret_val = DWC2_TRANSACTION_ALL;
2859
John Youn95832c02017-01-23 14:57:26 -08002860 if (!hsotg->params.uframe_sched)
Dom Cobley20f2eb92013-09-23 14:23:34 -07002861 hsotg->non_periodic_channels++;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002862 }
2863
2864 return ret_val;
2865}
2866
2867/**
2868 * dwc2_queue_transaction() - Attempts to queue a single transaction request for
2869 * a host channel associated with either a periodic or non-periodic transfer
2870 *
2871 * @hsotg: The HCD state structure
2872 * @chan: Host channel descriptor associated with either a periodic or
2873 * non-periodic transfer
2874 * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO
2875 * for periodic transfers or the non-periodic Tx FIFO
2876 * for non-periodic transfers
2877 *
2878 * Return: 1 if a request is queued and more requests may be needed to
2879 * complete the transfer, 0 if no more requests are required for this
2880 * transfer, -1 if there is insufficient space in the Tx FIFO
2881 *
2882 * This function assumes that there is space available in the appropriate
2883 * request queue. For an OUT transfer or SETUP transaction in Slave mode,
2884 * it checks whether space is available in the appropriate Tx FIFO.
2885 *
2886 * Must be called with interrupt disabled and spinlock held
2887 */
2888static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
2889 struct dwc2_host_chan *chan,
2890 u16 fifo_dwords_avail)
2891{
2892 int retval = 0;
2893
Douglas Andersonc9c8ac02016-01-28 18:19:57 -08002894 if (chan->do_split)
2895 /* Put ourselves on the list to keep order straight */
2896 list_move_tail(&chan->split_order_list_entry,
2897 &hsotg->split_order);
2898
John Youn95832c02017-01-23 14:57:26 -08002899 if (hsotg->params.host_dma) {
2900 if (hsotg->params.dma_desc_enable) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07002901 if (!chan->xfer_started ||
2902 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
2903 dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
2904 chan->qh->ping_state = 0;
2905 }
2906 } else if (!chan->xfer_started) {
2907 dwc2_hc_start_transfer(hsotg, chan);
2908 chan->qh->ping_state = 0;
2909 }
2910 } else if (chan->halt_pending) {
2911 /* Don't queue a request if the channel has been halted */
2912 } else if (chan->halt_on_queue) {
2913 dwc2_hc_halt(hsotg, chan, chan->halt_status);
2914 } else if (chan->do_ping) {
2915 if (!chan->xfer_started)
2916 dwc2_hc_start_transfer(hsotg, chan);
2917 } else if (!chan->ep_is_in ||
2918 chan->data_pid_start == DWC2_HC_PID_SETUP) {
2919 if ((fifo_dwords_avail * 4) >= chan->max_packet) {
2920 if (!chan->xfer_started) {
2921 dwc2_hc_start_transfer(hsotg, chan);
2922 retval = 1;
2923 } else {
2924 retval = dwc2_hc_continue_transfer(hsotg, chan);
2925 }
2926 } else {
2927 retval = -1;
2928 }
2929 } else {
2930 if (!chan->xfer_started) {
2931 dwc2_hc_start_transfer(hsotg, chan);
2932 retval = 1;
2933 } else {
2934 retval = dwc2_hc_continue_transfer(hsotg, chan);
2935 }
2936 }
2937
2938 return retval;
2939}
2940
2941/*
2942 * Processes periodic channels for the next frame and queues transactions for
2943 * these channels to the DWC_otg controller. After queueing transactions, the
2944 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
2945 * to queue as Periodic Tx FIFO or request queue space becomes available.
2946 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
2947 *
2948 * Must be called with interrupt disabled and spinlock held
2949 */
2950static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
2951{
2952 struct list_head *qh_ptr;
2953 struct dwc2_qh *qh;
2954 u32 tx_status;
2955 u32 fspcavail;
2956 u32 gintmsk;
2957 int status;
Douglas Anderson4e50e012016-01-28 18:20:03 -08002958 bool no_queue_space = false;
2959 bool no_fifo_space = false;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002960 u32 qspcavail;
2961
Douglas Anderson4e50e012016-01-28 18:20:03 -08002962 /* If empty list then just adjust interrupt enables */
2963 if (list_empty(&hsotg->periodic_sched_assigned))
2964 goto exit;
2965
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002966 if (dbg_perio())
2967 dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -07002968
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002969 tx_status = dwc2_readl(hsotg->regs + HPTXSTS);
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02002970 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
2971 TXSTS_QSPCAVAIL_SHIFT;
2972 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
2973 TXSTS_FSPCAVAIL_SHIFT;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002974
2975 if (dbg_perio()) {
2976 dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n",
2977 qspcavail);
2978 dev_vdbg(hsotg->dev, " P Tx FIFO Space Avail (before queue): %d\n",
2979 fspcavail);
2980 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07002981
2982 qh_ptr = hsotg->periodic_sched_assigned.next;
2983 while (qh_ptr != &hsotg->periodic_sched_assigned) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002984 tx_status = dwc2_readl(hsotg->regs + HPTXSTS);
Matthijs Kooijmanacdb9042013-08-30 18:45:16 +02002985 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
2986 TXSTS_QSPCAVAIL_SHIFT;
2987 if (qspcavail == 0) {
Nicholas Mc Guirefdb09b32017-01-12 16:55:02 +01002988 no_queue_space = true;
Paul Zimmerman7359d482013-03-11 17:47:59 -07002989 break;
2990 }
2991
2992 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
2993 if (!qh->channel) {
2994 qh_ptr = qh_ptr->next;
2995 continue;
2996 }
2997
2998 /* Make sure EP's TT buffer is clean before queueing qtds */
2999 if (qh->tt_buffer_dirty) {
3000 qh_ptr = qh_ptr->next;
3001 continue;
3002 }
3003
3004 /*
3005 * Set a flag if we're queuing high-bandwidth in slave mode.
3006 * The flag prevents any halts to get into the request queue in
3007 * the middle of multiple high-bandwidth packets getting queued.
3008 */
John Youn95832c02017-01-23 14:57:26 -08003009 if (!hsotg->params.host_dma &&
John Youn9da51972017-01-17 20:30:27 -08003010 qh->channel->multi_count > 1)
Paul Zimmerman7359d482013-03-11 17:47:59 -07003011 hsotg->queuing_high_bandwidth = 1;
3012
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003013 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3014 TXSTS_FSPCAVAIL_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003015 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
3016 if (status < 0) {
Nicholas Mc Guirefdb09b32017-01-12 16:55:02 +01003017 no_fifo_space = true;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003018 break;
3019 }
3020
3021 /*
3022 * In Slave mode, stay on the current transfer until there is
3023 * nothing more to do or the high-bandwidth request count is
3024 * reached. In DMA mode, only need to queue one request. The
3025 * controller automatically handles multiple packets for
3026 * high-bandwidth transfers.
3027 */
John Youn95832c02017-01-23 14:57:26 -08003028 if (hsotg->params.host_dma || status == 0 ||
Paul Zimmerman7359d482013-03-11 17:47:59 -07003029 qh->channel->requests == qh->channel->multi_count) {
3030 qh_ptr = qh_ptr->next;
3031 /*
3032 * Move the QH from the periodic assigned schedule to
3033 * the periodic queued schedule
3034 */
Douglas Anderson94ef7ae2016-01-28 18:19:56 -08003035 list_move_tail(&qh->qh_list_entry,
3036 &hsotg->periodic_sched_queued);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003037
3038 /* done queuing high bandwidth */
3039 hsotg->queuing_high_bandwidth = 0;
3040 }
3041 }
3042
Douglas Anderson4e50e012016-01-28 18:20:03 -08003043exit:
3044 if (no_queue_space || no_fifo_space ||
John Youn95832c02017-01-23 14:57:26 -08003045 (!hsotg->params.host_dma &&
Douglas Anderson4e50e012016-01-28 18:20:03 -08003046 !list_empty(&hsotg->periodic_sched_assigned))) {
3047 /*
3048 * May need to queue more transactions as the request
3049 * queue or Tx FIFO empties. Enable the periodic Tx
3050 * FIFO empty interrupt. (Always use the half-empty
3051 * level to ensure that new requests are loaded as
3052 * soon as possible.)
3053 */
3054 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
3055 if (!(gintmsk & GINTSTS_PTXFEMP)) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07003056 gintmsk |= GINTSTS_PTXFEMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003057 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
Douglas Anderson4e50e012016-01-28 18:20:03 -08003058 }
3059 } else {
3060 /*
3061 * Disable the Tx FIFO empty interrupt since there are
3062 * no more transactions that need to be queued right
3063 * now. This function is called from interrupt
3064 * handlers to queue more transactions as transfer
3065 * states change.
John Youn38beaec2017-01-17 20:31:13 -08003066 */
Douglas Anderson4e50e012016-01-28 18:20:03 -08003067 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
3068 if (gintmsk & GINTSTS_PTXFEMP) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07003069 gintmsk &= ~GINTSTS_PTXFEMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003070 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003071 }
3072 }
3073}
3074
3075/*
3076 * Processes active non-periodic channels and queues transactions for these
3077 * channels to the DWC_otg controller. After queueing transactions, the NP Tx
3078 * FIFO Empty interrupt is enabled if there are more transactions to queue as
3079 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
3080 * FIFO Empty interrupt is disabled.
3081 *
3082 * Must be called with interrupt disabled and spinlock held
3083 */
3084static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
3085{
3086 struct list_head *orig_qh_ptr;
3087 struct dwc2_qh *qh;
3088 u32 tx_status;
3089 u32 qspcavail;
3090 u32 fspcavail;
3091 u32 gintmsk;
3092 int status;
3093 int no_queue_space = 0;
3094 int no_fifo_space = 0;
3095 int more_to_do = 0;
3096
3097 dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
3098
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003099 tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003100 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3101 TXSTS_QSPCAVAIL_SHIFT;
3102 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3103 TXSTS_FSPCAVAIL_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003104 dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n",
3105 qspcavail);
3106 dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n",
3107 fspcavail);
3108
3109 /*
3110 * Keep track of the starting point. Skip over the start-of-list
3111 * entry.
3112 */
3113 if (hsotg->non_periodic_qh_ptr == &hsotg->non_periodic_sched_active)
3114 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
3115 orig_qh_ptr = hsotg->non_periodic_qh_ptr;
3116
3117 /*
3118 * Process once through the active list or until no more space is
3119 * available in the request queue or the Tx FIFO
3120 */
3121 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003122 tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003123 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3124 TXSTS_QSPCAVAIL_SHIFT;
John Youn95832c02017-01-23 14:57:26 -08003125 if (!hsotg->params.host_dma && qspcavail == 0) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07003126 no_queue_space = 1;
3127 break;
3128 }
3129
3130 qh = list_entry(hsotg->non_periodic_qh_ptr, struct dwc2_qh,
3131 qh_list_entry);
3132 if (!qh->channel)
3133 goto next;
3134
3135 /* Make sure EP's TT buffer is clean before queueing qtds */
3136 if (qh->tt_buffer_dirty)
3137 goto next;
3138
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003139 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3140 TXSTS_FSPCAVAIL_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003141 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
3142
3143 if (status > 0) {
3144 more_to_do = 1;
3145 } else if (status < 0) {
3146 no_fifo_space = 1;
3147 break;
3148 }
3149next:
3150 /* Advance to next QH, skipping start-of-list entry */
3151 hsotg->non_periodic_qh_ptr = hsotg->non_periodic_qh_ptr->next;
3152 if (hsotg->non_periodic_qh_ptr ==
3153 &hsotg->non_periodic_sched_active)
3154 hsotg->non_periodic_qh_ptr =
3155 hsotg->non_periodic_qh_ptr->next;
3156 } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
3157
John Youn95832c02017-01-23 14:57:26 -08003158 if (!hsotg->params.host_dma) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003159 tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003160 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
3161 TXSTS_QSPCAVAIL_SHIFT;
3162 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
3163 TXSTS_FSPCAVAIL_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003164 dev_vdbg(hsotg->dev,
3165 " NP Tx Req Queue Space Avail (after queue): %d\n",
3166 qspcavail);
3167 dev_vdbg(hsotg->dev,
3168 " NP Tx FIFO Space Avail (after queue): %d\n",
3169 fspcavail);
3170
3171 if (more_to_do || no_queue_space || no_fifo_space) {
3172 /*
3173 * May need to queue more transactions as the request
3174 * queue or Tx FIFO empties. Enable the non-periodic
3175 * Tx FIFO empty interrupt. (Always use the half-empty
3176 * level to ensure that new requests are loaded as
3177 * soon as possible.)
3178 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003179 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003180 gintmsk |= GINTSTS_NPTXFEMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003181 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003182 } else {
3183 /*
3184 * Disable the Tx FIFO empty interrupt since there are
3185 * no more transactions that need to be queued right
3186 * now. This function is called from interrupt
3187 * handlers to queue more transactions as transfer
3188 * states change.
3189 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003190 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003191 gintmsk &= ~GINTSTS_NPTXFEMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003192 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003193 }
3194 }
3195}
3196
3197/**
3198 * dwc2_hcd_queue_transactions() - Processes the currently active host channels
3199 * and queues transactions for these channels to the DWC_otg controller. Called
3200 * from the HCD interrupt handler functions.
3201 *
3202 * @hsotg: The HCD state structure
3203 * @tr_type: The type(s) of transactions to queue (non-periodic, periodic,
3204 * or both)
3205 *
3206 * Must be called with interrupt disabled and spinlock held
3207 */
3208void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
3209 enum dwc2_transaction_type tr_type)
3210{
3211#ifdef DWC2_DEBUG_SOF
3212 dev_vdbg(hsotg->dev, "Queue Transactions\n");
3213#endif
3214 /* Process host channels associated with periodic transfers */
Douglas Anderson4e50e012016-01-28 18:20:03 -08003215 if (tr_type == DWC2_TRANSACTION_PERIODIC ||
3216 tr_type == DWC2_TRANSACTION_ALL)
Paul Zimmerman7359d482013-03-11 17:47:59 -07003217 dwc2_process_periodic_channels(hsotg);
3218
3219 /* Process host channels associated with non-periodic transfers */
3220 if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
3221 tr_type == DWC2_TRANSACTION_ALL) {
3222 if (!list_empty(&hsotg->non_periodic_sched_active)) {
3223 dwc2_process_non_periodic_channels(hsotg);
3224 } else {
3225 /*
3226 * Ensure NP Tx FIFO empty interrupt is disabled when
3227 * there are no non-periodic transfers to process
3228 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003229 u32 gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003230
3231 gintmsk &= ~GINTSTS_NPTXFEMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003232 dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003233 }
3234 }
3235}
3236
3237static void dwc2_conn_id_status_change(struct work_struct *work)
3238{
3239 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
3240 wf_otg);
3241 u32 count = 0;
3242 u32 gotgctl;
Mian Yousaf Kaukab5390d432015-09-29 12:08:25 +02003243 unsigned long flags;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003244
3245 dev_dbg(hsotg->dev, "%s()\n", __func__);
3246
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003247 gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003248 dev_dbg(hsotg->dev, "gotgctl=%0x\n", gotgctl);
3249 dev_dbg(hsotg->dev, "gotgctl.b.conidsts=%d\n",
3250 !!(gotgctl & GOTGCTL_CONID_B));
3251
3252 /* B-Device connector (Device Mode) */
3253 if (gotgctl & GOTGCTL_CONID_B) {
3254 /* Wait for switch to device mode */
3255 dev_dbg(hsotg->dev, "connId B\n");
Chen Yu9156a7e2017-01-23 14:59:57 -08003256 if (hsotg->bus_suspended) {
3257 dev_info(hsotg->dev,
3258 "Do port resume before switching to device mode\n");
3259 dwc2_port_resume(hsotg);
3260 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07003261 while (!dwc2_is_device_mode(hsotg)) {
3262 dev_info(hsotg->dev,
3263 "Waiting for Peripheral Mode, Mode=%s\n",
3264 dwc2_is_host_mode(hsotg) ? "Host" :
3265 "Peripheral");
Nicholas Mc Guire04a9db72017-01-12 16:54:03 +01003266 msleep(20);
John Stultzfc30c4b2017-01-23 14:59:35 -08003267 /*
3268 * Sometimes the initial GOTGCTRL read is wrong, so
3269 * check it again and jump to host mode if that was
3270 * the case.
3271 */
3272 gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
3273 if (!(gotgctl & GOTGCTL_CONID_B))
3274 goto host;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003275 if (++count > 250)
3276 break;
3277 }
3278 if (count > 250)
3279 dev_err(hsotg->dev,
Paul Zimmermande9169a2013-04-22 14:00:17 -07003280 "Connection id status change timed out\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -07003281 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
Douglas Anderson0fe239b2015-12-17 11:14:40 -08003282 dwc2_core_init(hsotg, false);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003283 dwc2_enable_global_interrupts(hsotg);
Mian Yousaf Kaukab5390d432015-09-29 12:08:25 +02003284 spin_lock_irqsave(&hsotg->lock, flags);
Felipe Balbi1f91b4c2015-08-06 18:11:54 -05003285 dwc2_hsotg_core_init_disconnected(hsotg, false);
Mian Yousaf Kaukab5390d432015-09-29 12:08:25 +02003286 spin_unlock_irqrestore(&hsotg->lock, flags);
Felipe Balbi1f91b4c2015-08-06 18:11:54 -05003287 dwc2_hsotg_core_connect(hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003288 } else {
John Stultzfc30c4b2017-01-23 14:59:35 -08003289host:
Paul Zimmerman7359d482013-03-11 17:47:59 -07003290 /* A-Device connector (Host Mode) */
3291 dev_dbg(hsotg->dev, "connId A\n");
3292 while (!dwc2_is_host_mode(hsotg)) {
3293 dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n",
3294 dwc2_is_host_mode(hsotg) ?
3295 "Host" : "Peripheral");
Nicholas Mc Guire04a9db72017-01-12 16:54:03 +01003296 msleep(20);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003297 if (++count > 250)
3298 break;
3299 }
3300 if (count > 250)
3301 dev_err(hsotg->dev,
Paul Zimmermande9169a2013-04-22 14:00:17 -07003302 "Connection id status change timed out\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -07003303
John Stultzd2471d42017-10-23 14:32:48 -07003304 spin_lock_irqsave(&hsotg->lock, flags);
3305 dwc2_hsotg_disconnect(hsotg);
3306 spin_unlock_irqrestore(&hsotg->lock, flags);
3307
3308 hsotg->op_state = OTG_STATE_A_HOST;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003309 /* Initialize the Core for Host mode */
Douglas Anderson0fe239b2015-12-17 11:14:40 -08003310 dwc2_core_init(hsotg, false);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003311 dwc2_enable_global_interrupts(hsotg);
3312 dwc2_hcd_start(hsotg);
3313 }
3314}
3315
Kees Cooke99e88a2017-10-16 14:43:17 -07003316static void dwc2_wakeup_detected(struct timer_list *t)
Paul Zimmerman7359d482013-03-11 17:47:59 -07003317{
Kees Cooke99e88a2017-10-16 14:43:17 -07003318 struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003319 u32 hprt0;
3320
3321 dev_dbg(hsotg->dev, "%s()\n", __func__);
3322
3323 /*
3324 * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
3325 * so that OPT tests pass with all PHYs.)
3326 */
3327 hprt0 = dwc2_read_hprt0(hsotg);
3328 dev_dbg(hsotg->dev, "Resume: HPRT0=%0x\n", hprt0);
3329 hprt0 &= ~HPRT0_RES;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003330 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003331 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003332 dwc2_readl(hsotg->regs + HPRT0));
Paul Zimmerman7359d482013-03-11 17:47:59 -07003333
3334 dwc2_hcd_rem_wakeup(hsotg);
Nicholas Mc Guirefdb09b32017-01-12 16:55:02 +01003335 hsotg->bus_suspended = false;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003336
3337 /* Change to L0 state */
3338 hsotg->lx_state = DWC2_L0;
3339}
3340
3341static int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
3342{
3343 struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
3344
3345 return hcd->self.b_hnp_enable;
3346}
3347
3348/* Must NOT be called with interrupt disabled or spinlock held */
3349static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
3350{
3351 unsigned long flags;
3352 u32 hprt0;
3353 u32 pcgctl;
3354 u32 gotgctl;
3355
3356 dev_dbg(hsotg->dev, "%s()\n", __func__);
3357
3358 spin_lock_irqsave(&hsotg->lock, flags);
3359
3360 if (windex == hsotg->otg_port && dwc2_host_is_b_hnp_enabled(hsotg)) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003361 gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003362 gotgctl |= GOTGCTL_HSTSETHNPEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003363 dwc2_writel(gotgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003364 hsotg->op_state = OTG_STATE_A_SUSPEND;
3365 }
3366
3367 hprt0 = dwc2_read_hprt0(hsotg);
3368 hprt0 |= HPRT0_SUSP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003369 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003370
Nicholas Mc Guirefdb09b32017-01-12 16:55:02 +01003371 hsotg->bus_suspended = true;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003372
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02003373 /*
3374 * If hibernation is supported, Phy clock will be suspended
3375 * after registers are backuped.
3376 */
John Younbea8e862016-11-03 17:55:53 -07003377 if (!hsotg->params.hibernation) {
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02003378 /* Suspend the Phy Clock */
3379 pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
3380 pcgctl |= PCGCTL_STOPPCLK;
3381 dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
3382 udelay(10);
3383 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07003384
3385 /* For HNP the bus must be suspended for at least 200ms */
3386 if (dwc2_host_is_b_hnp_enabled(hsotg)) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003387 pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003388 pcgctl &= ~PCGCTL_STOPPCLK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003389 dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003390
3391 spin_unlock_irqrestore(&hsotg->lock, flags);
3392
Nicholas Mc Guire04a9db72017-01-12 16:54:03 +01003393 msleep(200);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003394 } else {
3395 spin_unlock_irqrestore(&hsotg->lock, flags);
3396 }
3397}
3398
Gregory Herrero30db1032015-09-22 15:16:38 +02003399/* Must NOT be called with interrupt disabled or spinlock held */
3400static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
3401{
3402 unsigned long flags;
3403 u32 hprt0;
3404 u32 pcgctl;
3405
Douglas Anderson4d273c22015-10-14 15:58:27 -07003406 spin_lock_irqsave(&hsotg->lock, flags);
3407
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02003408 /*
3409 * If hibernation is supported, Phy clock is already resumed
3410 * after registers restore.
3411 */
John Younbea8e862016-11-03 17:55:53 -07003412 if (!hsotg->params.hibernation) {
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02003413 pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
3414 pcgctl &= ~PCGCTL_STOPPCLK;
3415 dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
Douglas Anderson4d273c22015-10-14 15:58:27 -07003416 spin_unlock_irqrestore(&hsotg->lock, flags);
Nicholas Mc Guire04a9db72017-01-12 16:54:03 +01003417 msleep(20);
Douglas Anderson4d273c22015-10-14 15:58:27 -07003418 spin_lock_irqsave(&hsotg->lock, flags);
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02003419 }
Gregory Herrero30db1032015-09-22 15:16:38 +02003420
Gregory Herrero30db1032015-09-22 15:16:38 +02003421 hprt0 = dwc2_read_hprt0(hsotg);
3422 hprt0 |= HPRT0_RES;
3423 hprt0 &= ~HPRT0_SUSP;
3424 dwc2_writel(hprt0, hsotg->regs + HPRT0);
3425 spin_unlock_irqrestore(&hsotg->lock, flags);
3426
3427 msleep(USB_RESUME_TIMEOUT);
3428
3429 spin_lock_irqsave(&hsotg->lock, flags);
3430 hprt0 = dwc2_read_hprt0(hsotg);
3431 hprt0 &= ~(HPRT0_RES | HPRT0_SUSP);
3432 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Nicholas Mc Guirefdb09b32017-01-12 16:55:02 +01003433 hsotg->bus_suspended = false;
Gregory Herrero30db1032015-09-22 15:16:38 +02003434 spin_unlock_irqrestore(&hsotg->lock, flags);
3435}
3436
Paul Zimmerman7359d482013-03-11 17:47:59 -07003437/* Handles hub class-specific requests */
3438static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
3439 u16 wvalue, u16 windex, char *buf, u16 wlength)
3440{
3441 struct usb_hub_descriptor *hub_desc;
3442 int retval = 0;
3443 u32 hprt0;
3444 u32 port_status;
3445 u32 speed;
3446 u32 pcgctl;
3447
3448 switch (typereq) {
3449 case ClearHubFeature:
3450 dev_dbg(hsotg->dev, "ClearHubFeature %1xh\n", wvalue);
3451
3452 switch (wvalue) {
3453 case C_HUB_LOCAL_POWER:
3454 case C_HUB_OVER_CURRENT:
3455 /* Nothing required here */
3456 break;
3457
3458 default:
3459 retval = -EINVAL;
3460 dev_err(hsotg->dev,
3461 "ClearHubFeature request %1xh unknown\n",
3462 wvalue);
3463 }
3464 break;
3465
3466 case ClearPortFeature:
3467 if (wvalue != USB_PORT_FEAT_L1)
3468 if (!windex || windex > 1)
3469 goto error;
3470 switch (wvalue) {
3471 case USB_PORT_FEAT_ENABLE:
3472 dev_dbg(hsotg->dev,
3473 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
3474 hprt0 = dwc2_read_hprt0(hsotg);
3475 hprt0 |= HPRT0_ENA;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003476 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003477 break;
3478
3479 case USB_PORT_FEAT_SUSPEND:
3480 dev_dbg(hsotg->dev,
3481 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
Paul Zimmermanb0bb9bb2015-01-15 19:21:46 +00003482
Gregory Herrerobea78552015-09-22 15:16:44 +02003483 if (hsotg->bus_suspended)
3484 dwc2_port_resume(hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003485 break;
3486
3487 case USB_PORT_FEAT_POWER:
3488 dev_dbg(hsotg->dev,
3489 "ClearPortFeature USB_PORT_FEAT_POWER\n");
3490 hprt0 = dwc2_read_hprt0(hsotg);
3491 hprt0 &= ~HPRT0_PWR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003492 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003493 break;
3494
3495 case USB_PORT_FEAT_INDICATOR:
3496 dev_dbg(hsotg->dev,
3497 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
3498 /* Port indicator not supported */
3499 break;
3500
3501 case USB_PORT_FEAT_C_CONNECTION:
3502 /*
3503 * Clears driver's internal Connect Status Change flag
3504 */
3505 dev_dbg(hsotg->dev,
3506 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
3507 hsotg->flags.b.port_connect_status_change = 0;
3508 break;
3509
3510 case USB_PORT_FEAT_C_RESET:
3511 /* Clears driver's internal Port Reset Change flag */
3512 dev_dbg(hsotg->dev,
3513 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
3514 hsotg->flags.b.port_reset_change = 0;
3515 break;
3516
3517 case USB_PORT_FEAT_C_ENABLE:
3518 /*
3519 * Clears the driver's internal Port Enable/Disable
3520 * Change flag
3521 */
3522 dev_dbg(hsotg->dev,
3523 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
3524 hsotg->flags.b.port_enable_change = 0;
3525 break;
3526
3527 case USB_PORT_FEAT_C_SUSPEND:
3528 /*
3529 * Clears the driver's internal Port Suspend Change
3530 * flag, which is set when resume signaling on the host
3531 * port is complete
3532 */
3533 dev_dbg(hsotg->dev,
3534 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
3535 hsotg->flags.b.port_suspend_change = 0;
3536 break;
3537
3538 case USB_PORT_FEAT_C_PORT_L1:
3539 dev_dbg(hsotg->dev,
3540 "ClearPortFeature USB_PORT_FEAT_C_PORT_L1\n");
3541 hsotg->flags.b.port_l1_change = 0;
3542 break;
3543
3544 case USB_PORT_FEAT_C_OVER_CURRENT:
3545 dev_dbg(hsotg->dev,
3546 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
3547 hsotg->flags.b.port_over_current_change = 0;
3548 break;
3549
3550 default:
3551 retval = -EINVAL;
3552 dev_err(hsotg->dev,
3553 "ClearPortFeature request %1xh unknown or unsupported\n",
3554 wvalue);
3555 }
3556 break;
3557
3558 case GetHubDescriptor:
3559 dev_dbg(hsotg->dev, "GetHubDescriptor\n");
3560 hub_desc = (struct usb_hub_descriptor *)buf;
3561 hub_desc->bDescLength = 9;
Sergei Shtylyova5dd0392015-03-29 01:36:28 +03003562 hub_desc->bDescriptorType = USB_DT_HUB;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003563 hub_desc->bNbrPorts = 1;
Sergei Shtylyov3d040de2015-01-19 01:54:15 +03003564 hub_desc->wHubCharacteristics =
3565 cpu_to_le16(HUB_CHAR_COMMON_LPSM |
3566 HUB_CHAR_INDV_PORT_OCPM);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003567 hub_desc->bPwrOn2PwrGood = 1;
3568 hub_desc->bHubContrCurrent = 0;
3569 hub_desc->u.hs.DeviceRemovable[0] = 0;
3570 hub_desc->u.hs.DeviceRemovable[1] = 0xff;
3571 break;
3572
3573 case GetHubStatus:
3574 dev_dbg(hsotg->dev, "GetHubStatus\n");
3575 memset(buf, 0, 4);
3576 break;
3577
3578 case GetPortStatus:
Paul Zimmermanb8313412013-05-24 16:32:12 -07003579 dev_vdbg(hsotg->dev,
3580 "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
3581 hsotg->flags.d32);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003582 if (!windex || windex > 1)
3583 goto error;
3584
3585 port_status = 0;
3586 if (hsotg->flags.b.port_connect_status_change)
3587 port_status |= USB_PORT_STAT_C_CONNECTION << 16;
3588 if (hsotg->flags.b.port_enable_change)
3589 port_status |= USB_PORT_STAT_C_ENABLE << 16;
3590 if (hsotg->flags.b.port_suspend_change)
3591 port_status |= USB_PORT_STAT_C_SUSPEND << 16;
3592 if (hsotg->flags.b.port_l1_change)
3593 port_status |= USB_PORT_STAT_C_L1 << 16;
3594 if (hsotg->flags.b.port_reset_change)
3595 port_status |= USB_PORT_STAT_C_RESET << 16;
3596 if (hsotg->flags.b.port_over_current_change) {
3597 dev_warn(hsotg->dev, "Overcurrent change detected\n");
3598 port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
3599 }
3600
3601 if (!hsotg->flags.b.port_connect_status) {
3602 /*
3603 * The port is disconnected, which means the core is
3604 * either in device mode or it soon will be. Just
3605 * return 0's for the remainder of the port status
3606 * since the port register can't be read if the core
3607 * is in device mode.
3608 */
3609 *(__le32 *)buf = cpu_to_le32(port_status);
3610 break;
3611 }
3612
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003613 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
Paul Zimmermanb8313412013-05-24 16:32:12 -07003614 dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003615
3616 if (hprt0 & HPRT0_CONNSTS)
3617 port_status |= USB_PORT_STAT_CONNECTION;
3618 if (hprt0 & HPRT0_ENA)
3619 port_status |= USB_PORT_STAT_ENABLE;
3620 if (hprt0 & HPRT0_SUSP)
3621 port_status |= USB_PORT_STAT_SUSPEND;
3622 if (hprt0 & HPRT0_OVRCURRACT)
3623 port_status |= USB_PORT_STAT_OVERCURRENT;
3624 if (hprt0 & HPRT0_RST)
3625 port_status |= USB_PORT_STAT_RESET;
3626 if (hprt0 & HPRT0_PWR)
3627 port_status |= USB_PORT_STAT_POWER;
3628
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02003629 speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003630 if (speed == HPRT0_SPD_HIGH_SPEED)
3631 port_status |= USB_PORT_STAT_HIGH_SPEED;
3632 else if (speed == HPRT0_SPD_LOW_SPEED)
3633 port_status |= USB_PORT_STAT_LOW_SPEED;
3634
3635 if (hprt0 & HPRT0_TSTCTL_MASK)
3636 port_status |= USB_PORT_STAT_TEST;
3637 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
3638
John Younbea8e862016-11-03 17:55:53 -07003639 if (hsotg->params.dma_desc_fs_enable) {
Mian Yousaf Kaukabfbb9e222015-11-20 11:49:28 +01003640 /*
3641 * Enable descriptor DMA only if a full speed
3642 * device is connected.
3643 */
3644 if (hsotg->new_connection &&
3645 ((port_status &
3646 (USB_PORT_STAT_CONNECTION |
3647 USB_PORT_STAT_HIGH_SPEED |
3648 USB_PORT_STAT_LOW_SPEED)) ==
3649 USB_PORT_STAT_CONNECTION)) {
3650 u32 hcfg;
3651
3652 dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
John Youn95832c02017-01-23 14:57:26 -08003653 hsotg->params.dma_desc_enable = true;
Mian Yousaf Kaukabfbb9e222015-11-20 11:49:28 +01003654 hcfg = dwc2_readl(hsotg->regs + HCFG);
3655 hcfg |= HCFG_DESCDMA;
3656 dwc2_writel(hcfg, hsotg->regs + HCFG);
3657 hsotg->new_connection = false;
3658 }
3659 }
3660
Paul Zimmermanb8313412013-05-24 16:32:12 -07003661 dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003662 *(__le32 *)buf = cpu_to_le32(port_status);
3663 break;
3664
3665 case SetHubFeature:
3666 dev_dbg(hsotg->dev, "SetHubFeature\n");
3667 /* No HUB features supported */
3668 break;
3669
3670 case SetPortFeature:
3671 dev_dbg(hsotg->dev, "SetPortFeature\n");
3672 if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
3673 goto error;
3674
3675 if (!hsotg->flags.b.port_connect_status) {
3676 /*
3677 * The port is disconnected, which means the core is
3678 * either in device mode or it soon will be. Just
3679 * return without doing anything since the port
3680 * register can't be written if the core is in device
3681 * mode.
3682 */
3683 break;
3684 }
3685
3686 switch (wvalue) {
3687 case USB_PORT_FEAT_SUSPEND:
3688 dev_dbg(hsotg->dev,
3689 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
3690 if (windex != hsotg->otg_port)
3691 goto error;
3692 dwc2_port_suspend(hsotg, windex);
3693 break;
3694
3695 case USB_PORT_FEAT_POWER:
3696 dev_dbg(hsotg->dev,
3697 "SetPortFeature - USB_PORT_FEAT_POWER\n");
3698 hprt0 = dwc2_read_hprt0(hsotg);
3699 hprt0 |= HPRT0_PWR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003700 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003701 break;
3702
3703 case USB_PORT_FEAT_RESET:
3704 hprt0 = dwc2_read_hprt0(hsotg);
3705 dev_dbg(hsotg->dev,
3706 "SetPortFeature - USB_PORT_FEAT_RESET\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003707 pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003708 pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003709 dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003710 /* ??? Original driver does this */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003711 dwc2_writel(0, hsotg->regs + PCGCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003712
3713 hprt0 = dwc2_read_hprt0(hsotg);
3714 /* Clear suspend bit if resetting from suspend state */
3715 hprt0 &= ~HPRT0_SUSP;
3716
3717 /*
3718 * When B-Host the Port reset bit is set in the Start
3719 * HCD Callback function, so that the reset is started
3720 * within 1ms of the HNP success interrupt
3721 */
3722 if (!dwc2_hcd_is_b_host(hsotg)) {
3723 hprt0 |= HPRT0_PWR | HPRT0_RST;
3724 dev_dbg(hsotg->dev,
3725 "In host mode, hprt0=%08x\n", hprt0);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003726 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003727 }
3728
3729 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
Nicholas Mc Guire04a9db72017-01-12 16:54:03 +01003730 msleep(50);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003731 hprt0 &= ~HPRT0_RST;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003732 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003733 hsotg->lx_state = DWC2_L0; /* Now back to On state */
3734 break;
3735
3736 case USB_PORT_FEAT_INDICATOR:
3737 dev_dbg(hsotg->dev,
3738 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
3739 /* Not supported */
3740 break;
3741
Jingwu Lin96d480e2015-04-29 22:09:17 +02003742 case USB_PORT_FEAT_TEST:
3743 hprt0 = dwc2_read_hprt0(hsotg);
3744 dev_dbg(hsotg->dev,
3745 "SetPortFeature - USB_PORT_FEAT_TEST\n");
3746 hprt0 &= ~HPRT0_TSTCTL_MASK;
3747 hprt0 |= (windex >> 8) << HPRT0_TSTCTL_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003748 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Jingwu Lin96d480e2015-04-29 22:09:17 +02003749 break;
3750
Paul Zimmerman7359d482013-03-11 17:47:59 -07003751 default:
3752 retval = -EINVAL;
3753 dev_err(hsotg->dev,
3754 "SetPortFeature %1xh unknown or unsupported\n",
3755 wvalue);
3756 break;
3757 }
3758 break;
3759
3760 default:
3761error:
3762 retval = -EINVAL;
3763 dev_dbg(hsotg->dev,
3764 "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
3765 typereq, windex, wvalue);
3766 break;
3767 }
3768
3769 return retval;
3770}
3771
3772static int dwc2_hcd_is_status_changed(struct dwc2_hsotg *hsotg, int port)
3773{
3774 int retval;
3775
Paul Zimmerman7359d482013-03-11 17:47:59 -07003776 if (port != 1)
3777 return -EINVAL;
3778
3779 retval = (hsotg->flags.b.port_connect_status_change ||
3780 hsotg->flags.b.port_reset_change ||
3781 hsotg->flags.b.port_enable_change ||
3782 hsotg->flags.b.port_suspend_change ||
3783 hsotg->flags.b.port_over_current_change);
3784
3785 if (retval) {
3786 dev_dbg(hsotg->dev,
3787 "DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
3788 dev_dbg(hsotg->dev, " port_connect_status_change: %d\n",
3789 hsotg->flags.b.port_connect_status_change);
3790 dev_dbg(hsotg->dev, " port_reset_change: %d\n",
3791 hsotg->flags.b.port_reset_change);
3792 dev_dbg(hsotg->dev, " port_enable_change: %d\n",
3793 hsotg->flags.b.port_enable_change);
3794 dev_dbg(hsotg->dev, " port_suspend_change: %d\n",
3795 hsotg->flags.b.port_suspend_change);
3796 dev_dbg(hsotg->dev, " port_over_current_change: %d\n",
3797 hsotg->flags.b.port_over_current_change);
3798 }
3799
3800 return retval;
3801}
3802
3803int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
3804{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003805 u32 hfnum = dwc2_readl(hsotg->regs + HFNUM);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003806
3807#ifdef DWC2_DEBUG_SOF
3808 dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003809 (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003810#endif
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003811 return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003812}
3813
Douglas Andersonfae4e822016-01-28 18:20:10 -08003814int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us)
3815{
3816 u32 hprt = dwc2_readl(hsotg->regs + HPRT0);
3817 u32 hfir = dwc2_readl(hsotg->regs + HFIR);
3818 u32 hfnum = dwc2_readl(hsotg->regs + HFNUM);
3819 unsigned int us_per_frame;
3820 unsigned int frame_number;
3821 unsigned int remaining;
3822 unsigned int interval;
3823 unsigned int phy_clks;
3824
3825 /* High speed has 125 us per (micro) frame; others are 1 ms per */
3826 us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125;
3827
3828 /* Extract fields */
3829 frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
3830 remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT;
3831 interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT;
3832
3833 /*
3834 * Number of phy clocks since the last tick of the frame number after
3835 * "us" has passed.
3836 */
3837 phy_clks = (interval - remaining) +
3838 DIV_ROUND_UP(interval * us, us_per_frame);
3839
3840 return dwc2_frame_num_inc(frame_number, phy_clks / interval);
3841}
3842
Paul Zimmerman7359d482013-03-11 17:47:59 -07003843int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
3844{
Aldo Iljazi6bf2e2a2013-11-30 19:33:57 +02003845 return hsotg->op_state == OTG_STATE_B_HOST;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003846}
3847
3848static struct dwc2_hcd_urb *dwc2_hcd_urb_alloc(struct dwc2_hsotg *hsotg,
3849 int iso_desc_count,
3850 gfp_t mem_flags)
3851{
3852 struct dwc2_hcd_urb *urb;
3853 u32 size = sizeof(*urb) + iso_desc_count *
3854 sizeof(struct dwc2_hcd_iso_packet_desc);
3855
3856 urb = kzalloc(size, mem_flags);
3857 if (urb)
3858 urb->packet_count = iso_desc_count;
3859 return urb;
3860}
3861
3862static void dwc2_hcd_urb_set_pipeinfo(struct dwc2_hsotg *hsotg,
3863 struct dwc2_hcd_urb *urb, u8 dev_addr,
3864 u8 ep_num, u8 ep_type, u8 ep_dir, u16 mps)
3865{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02003866 if (dbg_perio() ||
3867 ep_type == USB_ENDPOINT_XFER_BULK ||
3868 ep_type == USB_ENDPOINT_XFER_CONTROL)
3869 dev_vdbg(hsotg->dev,
3870 "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
3871 dev_addr, ep_num, ep_dir, ep_type, mps);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003872 urb->pipe_info.dev_addr = dev_addr;
3873 urb->pipe_info.ep_num = ep_num;
3874 urb->pipe_info.pipe_type = ep_type;
3875 urb->pipe_info.pipe_dir = ep_dir;
3876 urb->pipe_info.mps = mps;
3877}
3878
3879/*
3880 * NOTE: This function will be removed once the peripheral controller code
3881 * is integrated and the driver is stable
3882 */
3883void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
3884{
3885#ifdef DEBUG
3886 struct dwc2_host_chan *chan;
3887 struct dwc2_hcd_urb *urb;
3888 struct dwc2_qtd *qtd;
3889 int num_channels;
3890 u32 np_tx_status;
3891 u32 p_tx_status;
3892 int i;
3893
John Younbea8e862016-11-03 17:55:53 -07003894 num_channels = hsotg->params.host_channels;
Paul Zimmerman7359d482013-03-11 17:47:59 -07003895 dev_dbg(hsotg->dev, "\n");
3896 dev_dbg(hsotg->dev,
3897 "************************************************************\n");
3898 dev_dbg(hsotg->dev, "HCD State:\n");
3899 dev_dbg(hsotg->dev, " Num channels: %d\n", num_channels);
3900
3901 for (i = 0; i < num_channels; i++) {
3902 chan = hsotg->hc_ptr_array[i];
3903 dev_dbg(hsotg->dev, " Channel %d:\n", i);
3904 dev_dbg(hsotg->dev,
3905 " dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
3906 chan->dev_addr, chan->ep_num, chan->ep_is_in);
3907 dev_dbg(hsotg->dev, " speed: %d\n", chan->speed);
3908 dev_dbg(hsotg->dev, " ep_type: %d\n", chan->ep_type);
3909 dev_dbg(hsotg->dev, " max_packet: %d\n", chan->max_packet);
3910 dev_dbg(hsotg->dev, " data_pid_start: %d\n",
3911 chan->data_pid_start);
3912 dev_dbg(hsotg->dev, " multi_count: %d\n", chan->multi_count);
3913 dev_dbg(hsotg->dev, " xfer_started: %d\n",
3914 chan->xfer_started);
3915 dev_dbg(hsotg->dev, " xfer_buf: %p\n", chan->xfer_buf);
3916 dev_dbg(hsotg->dev, " xfer_dma: %08lx\n",
3917 (unsigned long)chan->xfer_dma);
3918 dev_dbg(hsotg->dev, " xfer_len: %d\n", chan->xfer_len);
3919 dev_dbg(hsotg->dev, " xfer_count: %d\n", chan->xfer_count);
3920 dev_dbg(hsotg->dev, " halt_on_queue: %d\n",
3921 chan->halt_on_queue);
3922 dev_dbg(hsotg->dev, " halt_pending: %d\n",
3923 chan->halt_pending);
3924 dev_dbg(hsotg->dev, " halt_status: %d\n", chan->halt_status);
3925 dev_dbg(hsotg->dev, " do_split: %d\n", chan->do_split);
3926 dev_dbg(hsotg->dev, " complete_split: %d\n",
3927 chan->complete_split);
3928 dev_dbg(hsotg->dev, " hub_addr: %d\n", chan->hub_addr);
3929 dev_dbg(hsotg->dev, " hub_port: %d\n", chan->hub_port);
3930 dev_dbg(hsotg->dev, " xact_pos: %d\n", chan->xact_pos);
3931 dev_dbg(hsotg->dev, " requests: %d\n", chan->requests);
3932 dev_dbg(hsotg->dev, " qh: %p\n", chan->qh);
3933
3934 if (chan->xfer_started) {
3935 u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
3936
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003937 hfnum = dwc2_readl(hsotg->regs + HFNUM);
3938 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
3939 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(i));
3940 hcint = dwc2_readl(hsotg->regs + HCINT(i));
3941 hcintmsk = dwc2_readl(hsotg->regs + HCINTMSK(i));
Paul Zimmerman7359d482013-03-11 17:47:59 -07003942 dev_dbg(hsotg->dev, " hfnum: 0x%08x\n", hfnum);
3943 dev_dbg(hsotg->dev, " hcchar: 0x%08x\n", hcchar);
3944 dev_dbg(hsotg->dev, " hctsiz: 0x%08x\n", hctsiz);
3945 dev_dbg(hsotg->dev, " hcint: 0x%08x\n", hcint);
3946 dev_dbg(hsotg->dev, " hcintmsk: 0x%08x\n", hcintmsk);
3947 }
3948
3949 if (!(chan->xfer_started && chan->qh))
3950 continue;
3951
3952 list_for_each_entry(qtd, &chan->qh->qtd_list, qtd_list_entry) {
3953 if (!qtd->in_process)
3954 break;
3955 urb = qtd->urb;
3956 dev_dbg(hsotg->dev, " URB Info:\n");
3957 dev_dbg(hsotg->dev, " qtd: %p, urb: %p\n",
3958 qtd, urb);
3959 if (urb) {
3960 dev_dbg(hsotg->dev,
3961 " Dev: %d, EP: %d %s\n",
3962 dwc2_hcd_get_dev_addr(&urb->pipe_info),
3963 dwc2_hcd_get_ep_num(&urb->pipe_info),
3964 dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
3965 "IN" : "OUT");
3966 dev_dbg(hsotg->dev,
3967 " Max packet size: %d\n",
3968 dwc2_hcd_get_mps(&urb->pipe_info));
3969 dev_dbg(hsotg->dev,
3970 " transfer_buffer: %p\n",
3971 urb->buf);
Paul Zimmerman157dfaa2013-03-14 13:12:00 -07003972 dev_dbg(hsotg->dev,
3973 " transfer_dma: %08lx\n",
3974 (unsigned long)urb->dma);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003975 dev_dbg(hsotg->dev,
3976 " transfer_buffer_length: %d\n",
3977 urb->length);
3978 dev_dbg(hsotg->dev, " actual_length: %d\n",
3979 urb->actual_length);
3980 }
3981 }
3982 }
3983
3984 dev_dbg(hsotg->dev, " non_periodic_channels: %d\n",
3985 hsotg->non_periodic_channels);
3986 dev_dbg(hsotg->dev, " periodic_channels: %d\n",
3987 hsotg->periodic_channels);
3988 dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003989 np_tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003990 dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003991 (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003992 dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003993 (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003994 p_tx_status = dwc2_readl(hsotg->regs + HPTXSTS);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003995 dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003996 (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003997 dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02003998 (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
Paul Zimmerman7359d482013-03-11 17:47:59 -07003999 dwc2_hcd_dump_frrem(hsotg);
4000 dwc2_dump_global_registers(hsotg);
4001 dwc2_dump_host_registers(hsotg);
4002 dev_dbg(hsotg->dev,
4003 "************************************************************\n");
4004 dev_dbg(hsotg->dev, "\n");
4005#endif
4006}
4007
4008/*
4009 * NOTE: This function will be removed once the peripheral controller code
4010 * is integrated and the driver is stable
4011 */
4012void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg)
4013{
4014#ifdef DWC2_DUMP_FRREM
4015 dev_dbg(hsotg->dev, "Frame remaining at SOF:\n");
4016 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4017 hsotg->frrem_samples, hsotg->frrem_accum,
4018 hsotg->frrem_samples > 0 ?
4019 hsotg->frrem_accum / hsotg->frrem_samples : 0);
4020 dev_dbg(hsotg->dev, "\n");
4021 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 7):\n");
4022 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4023 hsotg->hfnum_7_samples,
4024 hsotg->hfnum_7_frrem_accum,
4025 hsotg->hfnum_7_samples > 0 ?
4026 hsotg->hfnum_7_frrem_accum / hsotg->hfnum_7_samples : 0);
4027 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 0):\n");
4028 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4029 hsotg->hfnum_0_samples,
4030 hsotg->hfnum_0_frrem_accum,
4031 hsotg->hfnum_0_samples > 0 ?
4032 hsotg->hfnum_0_frrem_accum / hsotg->hfnum_0_samples : 0);
4033 dev_dbg(hsotg->dev, "Frame remaining at start_transfer (uframe 1-6):\n");
4034 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4035 hsotg->hfnum_other_samples,
4036 hsotg->hfnum_other_frrem_accum,
4037 hsotg->hfnum_other_samples > 0 ?
4038 hsotg->hfnum_other_frrem_accum / hsotg->hfnum_other_samples :
4039 0);
4040 dev_dbg(hsotg->dev, "\n");
4041 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 7):\n");
4042 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4043 hsotg->hfnum_7_samples_a, hsotg->hfnum_7_frrem_accum_a,
4044 hsotg->hfnum_7_samples_a > 0 ?
4045 hsotg->hfnum_7_frrem_accum_a / hsotg->hfnum_7_samples_a : 0);
4046 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 0):\n");
4047 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4048 hsotg->hfnum_0_samples_a, hsotg->hfnum_0_frrem_accum_a,
4049 hsotg->hfnum_0_samples_a > 0 ?
4050 hsotg->hfnum_0_frrem_accum_a / hsotg->hfnum_0_samples_a : 0);
4051 dev_dbg(hsotg->dev, "Frame remaining at sample point A (uframe 1-6):\n");
4052 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4053 hsotg->hfnum_other_samples_a, hsotg->hfnum_other_frrem_accum_a,
4054 hsotg->hfnum_other_samples_a > 0 ?
4055 hsotg->hfnum_other_frrem_accum_a / hsotg->hfnum_other_samples_a
4056 : 0);
4057 dev_dbg(hsotg->dev, "\n");
4058 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 7):\n");
4059 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4060 hsotg->hfnum_7_samples_b, hsotg->hfnum_7_frrem_accum_b,
4061 hsotg->hfnum_7_samples_b > 0 ?
4062 hsotg->hfnum_7_frrem_accum_b / hsotg->hfnum_7_samples_b : 0);
4063 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 0):\n");
4064 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4065 hsotg->hfnum_0_samples_b, hsotg->hfnum_0_frrem_accum_b,
4066 (hsotg->hfnum_0_samples_b > 0) ?
4067 hsotg->hfnum_0_frrem_accum_b / hsotg->hfnum_0_samples_b : 0);
4068 dev_dbg(hsotg->dev, "Frame remaining at sample point B (uframe 1-6):\n");
4069 dev_dbg(hsotg->dev, " samples %u, accum %llu, avg %llu\n",
4070 hsotg->hfnum_other_samples_b, hsotg->hfnum_other_frrem_accum_b,
4071 (hsotg->hfnum_other_samples_b > 0) ?
4072 hsotg->hfnum_other_frrem_accum_b / hsotg->hfnum_other_samples_b
4073 : 0);
4074#endif
4075}
4076
4077struct wrapper_priv_data {
4078 struct dwc2_hsotg *hsotg;
4079};
4080
4081/* Gets the dwc2_hsotg from a usb_hcd */
4082static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
4083{
4084 struct wrapper_priv_data *p;
4085
John Youn9da51972017-01-17 20:30:27 -08004086 p = (struct wrapper_priv_data *)&hcd->hcd_priv;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004087 return p->hsotg;
4088}
4089
Douglas Anderson9f9f09b2016-01-28 18:20:12 -08004090/**
4091 * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
4092 *
4093 * This will get the dwc2_tt structure (and ttport) associated with the given
4094 * context (which is really just a struct urb pointer).
4095 *
4096 * The first time this is called for a given TT we allocate memory for our
4097 * structure. When everyone is done and has called dwc2_host_put_tt_info()
4098 * then the refcount for the structure will go to 0 and we'll free it.
4099 *
4100 * @hsotg: The HCD state structure for the DWC OTG controller.
4101 * @qh: The QH structure.
4102 * @context: The priv pointer from a struct dwc2_hcd_urb.
4103 * @mem_flags: Flags for allocating memory.
4104 * @ttport: We'll return this device's port number here. That's used to
4105 * reference into the bitmap if we're on a multi_tt hub.
4106 *
4107 * Return: a pointer to a struct dwc2_tt. Don't forget to call
4108 * dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure.
4109 */
4110
4111struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context,
4112 gfp_t mem_flags, int *ttport)
4113{
4114 struct urb *urb = context;
4115 struct dwc2_tt *dwc_tt = NULL;
4116
4117 if (urb->dev->tt) {
4118 *ttport = urb->dev->ttport;
4119
4120 dwc_tt = urb->dev->tt->hcpriv;
John Youn9da51972017-01-17 20:30:27 -08004121 if (!dwc_tt) {
Douglas Anderson9f9f09b2016-01-28 18:20:12 -08004122 size_t bitmap_size;
4123
4124 /*
4125 * For single_tt we need one schedule. For multi_tt
4126 * we need one per port.
4127 */
4128 bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP *
4129 sizeof(dwc_tt->periodic_bitmaps[0]);
4130 if (urb->dev->tt->multi)
4131 bitmap_size *= urb->dev->tt->hub->maxchild;
4132
4133 dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size,
4134 mem_flags);
John Youn9da51972017-01-17 20:30:27 -08004135 if (!dwc_tt)
Douglas Anderson9f9f09b2016-01-28 18:20:12 -08004136 return NULL;
4137
4138 dwc_tt->usb_tt = urb->dev->tt;
4139 dwc_tt->usb_tt->hcpriv = dwc_tt;
4140 }
4141
4142 dwc_tt->refcount++;
4143 }
4144
4145 return dwc_tt;
4146}
4147
4148/**
4149 * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info()
4150 *
4151 * Frees resources allocated by dwc2_host_get_tt_info() if all current holders
4152 * of the structure are done.
4153 *
4154 * It's OK to call this with NULL.
4155 *
4156 * @hsotg: The HCD state structure for the DWC OTG controller.
4157 * @dwc_tt: The pointer returned by dwc2_host_get_tt_info.
4158 */
4159void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt)
4160{
4161 /* Model kfree and make put of NULL a no-op */
John Youn9da51972017-01-17 20:30:27 -08004162 if (!dwc_tt)
Douglas Anderson9f9f09b2016-01-28 18:20:12 -08004163 return;
4164
4165 WARN_ON(dwc_tt->refcount < 1);
4166
4167 dwc_tt->refcount--;
4168 if (!dwc_tt->refcount) {
4169 dwc_tt->usb_tt->hcpriv = NULL;
4170 kfree(dwc_tt);
4171 }
4172}
4173
Paul Zimmerman7359d482013-03-11 17:47:59 -07004174int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
4175{
4176 struct urb *urb = context;
4177
4178 return urb->dev->speed;
4179}
4180
4181static void dwc2_allocate_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
4182 struct urb *urb)
4183{
4184 struct usb_bus *bus = hcd_to_bus(hcd);
4185
4186 if (urb->interval)
4187 bus->bandwidth_allocated += bw / urb->interval;
4188 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
4189 bus->bandwidth_isoc_reqs++;
4190 else
4191 bus->bandwidth_int_reqs++;
4192}
4193
4194static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
4195 struct urb *urb)
4196{
4197 struct usb_bus *bus = hcd_to_bus(hcd);
4198
4199 if (urb->interval)
4200 bus->bandwidth_allocated -= bw / urb->interval;
4201 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
4202 bus->bandwidth_isoc_reqs--;
4203 else
4204 bus->bandwidth_int_reqs--;
4205}
4206
4207/*
4208 * Sets the final status of an URB and returns it to the upper layer. Any
4209 * required cleanup of the URB is performed.
4210 *
4211 * Must be called with interrupt disabled and spinlock held
4212 */
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004213void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
4214 int status)
Paul Zimmerman7359d482013-03-11 17:47:59 -07004215{
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004216 struct urb *urb;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004217 int i;
4218
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004219 if (!qtd) {
4220 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
4221 return;
4222 }
4223
4224 if (!qtd->urb) {
4225 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
4226 return;
4227 }
4228
4229 urb = qtd->urb->priv;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004230 if (!urb) {
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004231 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004232 return;
4233 }
4234
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004235 urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004236
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02004237 if (dbg_urb(urb))
4238 dev_vdbg(hsotg->dev,
4239 "%s: urb %p device %d ep %d-%s status %d actual %d\n",
4240 __func__, urb, usb_pipedevice(urb->pipe),
4241 usb_pipeendpoint(urb->pipe),
4242 usb_pipein(urb->pipe) ? "IN" : "OUT", status,
4243 urb->actual_length);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004244
Paul Zimmerman7359d482013-03-11 17:47:59 -07004245 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004246 urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004247 for (i = 0; i < urb->number_of_packets; ++i) {
4248 urb->iso_frame_desc[i].actual_length =
4249 dwc2_hcd_urb_get_iso_desc_actual_length(
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004250 qtd->urb, i);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004251 urb->iso_frame_desc[i].status =
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004252 dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004253 }
4254 }
4255
Gregory Herrerofe9b1772015-09-22 15:16:51 +02004256 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS && dbg_perio()) {
4257 for (i = 0; i < urb->number_of_packets; i++)
4258 dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n",
4259 i, urb->iso_frame_desc[i].status);
4260 }
4261
Paul Zimmerman7359d482013-03-11 17:47:59 -07004262 urb->status = status;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004263 if (!status) {
4264 if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
4265 urb->actual_length < urb->transfer_buffer_length)
4266 urb->status = -EREMOTEIO;
4267 }
4268
4269 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
4270 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
4271 struct usb_host_endpoint *ep = urb->ep;
4272
4273 if (ep)
4274 dwc2_free_bus_bandwidth(dwc2_hsotg_to_hcd(hsotg),
4275 dwc2_hcd_get_ep_bandwidth(hsotg, ep),
4276 urb);
4277 }
4278
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004279 usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb);
Paul Zimmerman0d012b92013-07-13 14:53:48 -07004280 urb->hcpriv = NULL;
4281 kfree(qtd->urb);
4282 qtd->urb = NULL;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004283
Paul Zimmerman7359d482013-03-11 17:47:59 -07004284 usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004285}
4286
4287/*
4288 * Work queue function for starting the HCD when A-Cable is connected
4289 */
4290static void dwc2_hcd_start_func(struct work_struct *work)
4291{
4292 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
4293 start_work.work);
4294
4295 dev_dbg(hsotg->dev, "%s() %p\n", __func__, hsotg);
4296 dwc2_host_start(hsotg);
4297}
4298
4299/*
4300 * Reset work queue function
4301 */
4302static void dwc2_hcd_reset_func(struct work_struct *work)
4303{
4304 struct dwc2_hsotg *hsotg = container_of(work, struct dwc2_hsotg,
4305 reset_work.work);
Douglas Anderson4a065c72015-11-20 09:06:27 -08004306 unsigned long flags;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004307 u32 hprt0;
4308
4309 dev_dbg(hsotg->dev, "USB RESET function called\n");
Douglas Anderson4a065c72015-11-20 09:06:27 -08004310
4311 spin_lock_irqsave(&hsotg->lock, flags);
4312
Paul Zimmerman7359d482013-03-11 17:47:59 -07004313 hprt0 = dwc2_read_hprt0(hsotg);
4314 hprt0 &= ~HPRT0_RST;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03004315 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004316 hsotg->flags.b.port_reset_change = 1;
Douglas Anderson4a065c72015-11-20 09:06:27 -08004317
4318 spin_unlock_irqrestore(&hsotg->lock, flags);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004319}
4320
4321/*
4322 * =========================================================================
4323 * Linux HC Driver Functions
4324 * =========================================================================
4325 */
4326
4327/*
4328 * Initializes the DWC_otg controller and its root hub and prepares it for host
4329 * mode operation. Activates the root port. Returns 0 on success and a negative
4330 * error code on failure.
4331 */
4332static int _dwc2_hcd_start(struct usb_hcd *hcd)
4333{
4334 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4335 struct usb_bus *bus = hcd_to_bus(hcd);
4336 unsigned long flags;
4337
4338 dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
4339
4340 spin_lock_irqsave(&hsotg->lock, flags);
Gregory Herrero31927b62015-09-22 15:16:41 +02004341 hsotg->lx_state = DWC2_L0;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004342 hcd->state = HC_STATE_RUNNING;
Gregory Herrero31927b62015-09-22 15:16:41 +02004343 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004344
4345 if (dwc2_is_device_mode(hsotg)) {
4346 spin_unlock_irqrestore(&hsotg->lock, flags);
4347 return 0; /* why 0 ?? */
4348 }
4349
4350 dwc2_hcd_reinit(hsotg);
4351
4352 /* Initialize and connect root hub if one is not already attached */
4353 if (bus->root_hub) {
4354 dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
4355 /* Inform the HUB driver to resume */
4356 usb_hcd_resume_root_hub(hcd);
4357 }
4358
4359 spin_unlock_irqrestore(&hsotg->lock, flags);
4360 return 0;
4361}
4362
4363/*
4364 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
4365 * stopped.
4366 */
4367static void _dwc2_hcd_stop(struct usb_hcd *hcd)
4368{
4369 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4370 unsigned long flags;
4371
Gregory Herrero5bbf6ce2015-09-22 15:16:48 +02004372 /* Turn off all host-specific interrupts */
4373 dwc2_disable_host_interrupts(hsotg);
4374
Gregory Herrero091473a2015-09-22 15:16:46 +02004375 /* Wait for interrupt processing to finish */
4376 synchronize_irq(hcd->irq);
4377
Paul Zimmerman7359d482013-03-11 17:47:59 -07004378 spin_lock_irqsave(&hsotg->lock, flags);
Gregory Herrero091473a2015-09-22 15:16:46 +02004379 /* Ensure hcd is disconnected */
Douglas Anderson6a659532015-11-19 13:23:14 -08004380 dwc2_hcd_disconnect(hsotg, true);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004381 dwc2_hcd_stop(hsotg);
Gregory Herrero31927b62015-09-22 15:16:41 +02004382 hsotg->lx_state = DWC2_L3;
4383 hcd->state = HC_STATE_HALT;
4384 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004385 spin_unlock_irqrestore(&hsotg->lock, flags);
4386
4387 usleep_range(1000, 3000);
4388}
4389
Gregory Herrero99a65792015-04-29 22:09:13 +02004390static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
4391{
4392 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004393 unsigned long flags;
4394 int ret = 0;
4395 u32 hprt0;
Gregory Herrero99a65792015-04-29 22:09:13 +02004396
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004397 spin_lock_irqsave(&hsotg->lock, flags);
4398
Meng Dongyangf367b722017-08-09 10:34:09 +08004399 if (dwc2_is_device_mode(hsotg))
4400 goto unlock;
4401
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004402 if (hsotg->lx_state != DWC2_L0)
4403 goto unlock;
4404
4405 if (!HCD_HW_ACCESSIBLE(hcd))
4406 goto unlock;
4407
John Stultz866932e2017-01-09 13:10:24 -08004408 if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
4409 goto unlock;
4410
John Younbea8e862016-11-03 17:55:53 -07004411 if (!hsotg->params.hibernation)
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004412 goto skip_power_saving;
4413
4414 /*
4415 * Drive USB suspend and disable port Power
4416 * if usb bus is not suspended.
4417 */
4418 if (!hsotg->bus_suspended) {
4419 hprt0 = dwc2_read_hprt0(hsotg);
4420 hprt0 |= HPRT0_SUSP;
4421 hprt0 &= ~HPRT0_PWR;
4422 dwc2_writel(hprt0, hsotg->regs + HPRT0);
4423 }
4424
4425 /* Enter hibernation */
4426 ret = dwc2_enter_hibernation(hsotg);
4427 if (ret) {
4428 if (ret != -ENOTSUPP)
4429 dev_err(hsotg->dev,
4430 "enter hibernation failed\n");
4431 goto skip_power_saving;
4432 }
4433
4434 /* Ask phy to be suspended */
4435 if (!IS_ERR_OR_NULL(hsotg->uphy)) {
4436 spin_unlock_irqrestore(&hsotg->lock, flags);
4437 usb_phy_set_suspend(hsotg->uphy, true);
4438 spin_lock_irqsave(&hsotg->lock, flags);
4439 }
4440
4441 /* After entering hibernation, hardware is no more accessible */
4442 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4443
4444skip_power_saving:
Gregory Herrero99a65792015-04-29 22:09:13 +02004445 hsotg->lx_state = DWC2_L2;
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004446unlock:
4447 spin_unlock_irqrestore(&hsotg->lock, flags);
4448
4449 return ret;
Gregory Herrero99a65792015-04-29 22:09:13 +02004450}
4451
4452static int _dwc2_hcd_resume(struct usb_hcd *hcd)
4453{
4454 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004455 unsigned long flags;
4456 int ret = 0;
4457
4458 spin_lock_irqsave(&hsotg->lock, flags);
4459
Meng Dongyangf367b722017-08-09 10:34:09 +08004460 if (dwc2_is_device_mode(hsotg))
4461 goto unlock;
4462
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004463 if (hsotg->lx_state != DWC2_L2)
4464 goto unlock;
4465
John Younbea8e862016-11-03 17:55:53 -07004466 if (!hsotg->params.hibernation) {
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004467 hsotg->lx_state = DWC2_L0;
4468 goto unlock;
4469 }
4470
4471 /*
4472 * Set HW accessible bit before powering on the controller
4473 * since an interrupt may rise.
4474 */
4475 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
4476
4477 /*
4478 * Enable power if not already done.
4479 * This must not be spinlocked since duration
4480 * of this call is unknown.
4481 */
4482 if (!IS_ERR_OR_NULL(hsotg->uphy)) {
4483 spin_unlock_irqrestore(&hsotg->lock, flags);
4484 usb_phy_set_suspend(hsotg->uphy, false);
4485 spin_lock_irqsave(&hsotg->lock, flags);
4486 }
4487
4488 /* Exit hibernation */
4489 ret = dwc2_exit_hibernation(hsotg, true);
4490 if (ret && (ret != -ENOTSUPP))
4491 dev_err(hsotg->dev, "exit hibernation failed\n");
Gregory Herrero99a65792015-04-29 22:09:13 +02004492
4493 hsotg->lx_state = DWC2_L0;
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004494
4495 spin_unlock_irqrestore(&hsotg->lock, flags);
4496
4497 if (hsotg->bus_suspended) {
4498 spin_lock_irqsave(&hsotg->lock, flags);
4499 hsotg->flags.b.port_suspend_change = 1;
4500 spin_unlock_irqrestore(&hsotg->lock, flags);
4501 dwc2_port_resume(hsotg);
4502 } else {
Gregory Herrero5634e012015-09-22 15:16:50 +02004503 /* Wait for controller to correctly update D+/D- level */
4504 usleep_range(3000, 5000);
4505
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004506 /*
4507 * Clear Port Enable and Port Status changes.
4508 * Enable Port Power.
4509 */
4510 dwc2_writel(HPRT0_PWR | HPRT0_CONNDET |
4511 HPRT0_ENACHG, hsotg->regs + HPRT0);
4512 /* Wait for controller to detect Port Connect */
Gregory Herrero5634e012015-09-22 15:16:50 +02004513 usleep_range(5000, 7000);
Gregory Herreroa2a23d3f2015-09-22 15:16:40 +02004514 }
4515
4516 return ret;
4517unlock:
4518 spin_unlock_irqrestore(&hsotg->lock, flags);
4519
4520 return ret;
Gregory Herrero99a65792015-04-29 22:09:13 +02004521}
4522
Paul Zimmerman7359d482013-03-11 17:47:59 -07004523/* Returns the current frame number */
4524static int _dwc2_hcd_get_frame_number(struct usb_hcd *hcd)
4525{
4526 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4527
4528 return dwc2_hcd_get_frame_number(hsotg);
4529}
4530
4531static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
4532 char *fn_name)
4533{
4534#ifdef VERBOSE_DEBUG
4535 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
Nicholas Mc Guireefe357f2017-01-12 17:33:26 +01004536 char *pipetype = NULL;
4537 char *speed = NULL;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004538
4539 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
4540 dev_vdbg(hsotg->dev, " Device address: %d\n",
4541 usb_pipedevice(urb->pipe));
4542 dev_vdbg(hsotg->dev, " Endpoint: %d, %s\n",
4543 usb_pipeendpoint(urb->pipe),
4544 usb_pipein(urb->pipe) ? "IN" : "OUT");
4545
4546 switch (usb_pipetype(urb->pipe)) {
4547 case PIPE_CONTROL:
4548 pipetype = "CONTROL";
4549 break;
4550 case PIPE_BULK:
4551 pipetype = "BULK";
4552 break;
4553 case PIPE_INTERRUPT:
4554 pipetype = "INTERRUPT";
4555 break;
4556 case PIPE_ISOCHRONOUS:
4557 pipetype = "ISOCHRONOUS";
4558 break;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004559 }
4560
4561 dev_vdbg(hsotg->dev, " Endpoint type: %s %s (%s)\n", pipetype,
4562 usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ?
4563 "IN" : "OUT");
4564
4565 switch (urb->dev->speed) {
4566 case USB_SPEED_HIGH:
4567 speed = "HIGH";
4568 break;
4569 case USB_SPEED_FULL:
4570 speed = "FULL";
4571 break;
4572 case USB_SPEED_LOW:
4573 speed = "LOW";
4574 break;
4575 default:
4576 speed = "UNKNOWN";
4577 break;
4578 }
4579
4580 dev_vdbg(hsotg->dev, " Speed: %s\n", speed);
4581 dev_vdbg(hsotg->dev, " Max packet size: %d\n",
4582 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
4583 dev_vdbg(hsotg->dev, " Data buffer length: %d\n",
4584 urb->transfer_buffer_length);
Paul Zimmerman157dfaa2013-03-14 13:12:00 -07004585 dev_vdbg(hsotg->dev, " Transfer buffer: %p, Transfer DMA: %08lx\n",
4586 urb->transfer_buffer, (unsigned long)urb->transfer_dma);
4587 dev_vdbg(hsotg->dev, " Setup buffer: %p, Setup DMA: %08lx\n",
4588 urb->setup_packet, (unsigned long)urb->setup_dma);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004589 dev_vdbg(hsotg->dev, " Interval: %d\n", urb->interval);
4590
4591 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
4592 int i;
4593
4594 for (i = 0; i < urb->number_of_packets; i++) {
4595 dev_vdbg(hsotg->dev, " ISO Desc %d:\n", i);
4596 dev_vdbg(hsotg->dev, " offset: %d, length %d\n",
4597 urb->iso_frame_desc[i].offset,
4598 urb->iso_frame_desc[i].length);
4599 }
4600 }
4601#endif
4602}
4603
4604/*
4605 * Starts processing a USB transfer request specified by a USB Request Block
4606 * (URB). mem_flags indicates the type of memory allocation to use while
4607 * processing this URB.
4608 */
4609static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
4610 gfp_t mem_flags)
4611{
4612 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4613 struct usb_host_endpoint *ep = urb->ep;
4614 struct dwc2_hcd_urb *dwc2_urb;
4615 int i;
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004616 int retval;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004617 int alloc_bandwidth = 0;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004618 u8 ep_type = 0;
4619 u32 tflags = 0;
4620 void *buf;
4621 unsigned long flags;
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02004622 struct dwc2_qh *qh;
4623 bool qh_allocated = false;
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02004624 struct dwc2_qtd *qtd;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004625
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02004626 if (dbg_urb(urb)) {
4627 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
4628 dwc2_dump_urb_info(hcd, urb, "urb_enqueue");
4629 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07004630
John Youn9da51972017-01-17 20:30:27 -08004631 if (!ep)
Paul Zimmerman7359d482013-03-11 17:47:59 -07004632 return -EINVAL;
4633
4634 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
4635 usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
4636 spin_lock_irqsave(&hsotg->lock, flags);
4637 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, ep))
4638 alloc_bandwidth = 1;
4639 spin_unlock_irqrestore(&hsotg->lock, flags);
4640 }
4641
4642 switch (usb_pipetype(urb->pipe)) {
4643 case PIPE_CONTROL:
4644 ep_type = USB_ENDPOINT_XFER_CONTROL;
4645 break;
4646 case PIPE_ISOCHRONOUS:
4647 ep_type = USB_ENDPOINT_XFER_ISOC;
4648 break;
4649 case PIPE_BULK:
4650 ep_type = USB_ENDPOINT_XFER_BULK;
4651 break;
4652 case PIPE_INTERRUPT:
4653 ep_type = USB_ENDPOINT_XFER_INT;
4654 break;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004655 }
4656
4657 dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
4658 mem_flags);
4659 if (!dwc2_urb)
4660 return -ENOMEM;
4661
4662 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, usb_pipedevice(urb->pipe),
4663 usb_pipeendpoint(urb->pipe), ep_type,
4664 usb_pipein(urb->pipe),
4665 usb_maxpacket(urb->dev, urb->pipe,
4666 !(usb_pipein(urb->pipe))));
4667
4668 buf = urb->transfer_buffer;
Paul Zimmerman25a49442013-07-13 14:53:53 -07004669
Paul Zimmerman7359d482013-03-11 17:47:59 -07004670 if (hcd->self.uses_dma) {
Paul Zimmerman25a49442013-07-13 14:53:53 -07004671 if (!buf && (urb->transfer_dma & 3)) {
4672 dev_err(hsotg->dev,
4673 "%s: unaligned transfer with no transfer_buffer",
4674 __func__);
4675 retval = -EINVAL;
Gregory Herrero33ad2612015-04-29 22:09:15 +02004676 goto fail0;
Paul Zimmerman25a49442013-07-13 14:53:53 -07004677 }
Paul Zimmerman7359d482013-03-11 17:47:59 -07004678 }
4679
4680 if (!(urb->transfer_flags & URB_NO_INTERRUPT))
4681 tflags |= URB_GIVEBACK_ASAP;
4682 if (urb->transfer_flags & URB_ZERO_PACKET)
4683 tflags |= URB_SEND_ZERO_PACKET;
4684
4685 dwc2_urb->priv = urb;
4686 dwc2_urb->buf = buf;
4687 dwc2_urb->dma = urb->transfer_dma;
4688 dwc2_urb->length = urb->transfer_buffer_length;
4689 dwc2_urb->setup_packet = urb->setup_packet;
4690 dwc2_urb->setup_dma = urb->setup_dma;
4691 dwc2_urb->flags = tflags;
4692 dwc2_urb->interval = urb->interval;
4693 dwc2_urb->status = -EINPROGRESS;
4694
4695 for (i = 0; i < urb->number_of_packets; ++i)
4696 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i,
4697 urb->iso_frame_desc[i].offset,
4698 urb->iso_frame_desc[i].length);
4699
4700 urb->hcpriv = dwc2_urb;
John Youn9da51972017-01-17 20:30:27 -08004701 qh = (struct dwc2_qh *)ep->hcpriv;
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02004702 /* Create QH for the endpoint if it doesn't exist */
4703 if (!qh) {
4704 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
4705 if (!qh) {
4706 retval = -ENOMEM;
4707 goto fail0;
4708 }
4709 ep->hcpriv = qh;
4710 qh_allocated = true;
4711 }
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004712
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02004713 qtd = kzalloc(sizeof(*qtd), mem_flags);
4714 if (!qtd) {
4715 retval = -ENOMEM;
4716 goto fail1;
4717 }
4718
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004719 spin_lock_irqsave(&hsotg->lock, flags);
4720 retval = usb_hcd_link_urb_to_ep(hcd, urb);
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004721 if (retval)
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004722 goto fail2;
4723
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02004724 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
4725 if (retval)
4726 goto fail3;
4727
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004728 if (alloc_bandwidth) {
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004729 dwc2_allocate_bus_bandwidth(hcd,
4730 dwc2_hcd_get_ep_bandwidth(hsotg, ep),
4731 urb);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004732 }
4733
Gregory Herrero33ad2612015-04-29 22:09:15 +02004734 spin_unlock_irqrestore(&hsotg->lock, flags);
4735
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004736 return 0;
4737
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02004738fail3:
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004739 dwc2_urb->priv = NULL;
4740 usb_hcd_unlink_urb_from_ep(hcd, urb);
Douglas Anderson16e80212016-01-28 18:19:55 -08004741 if (qh_allocated && qh->channel && qh->channel->qh == qh)
4742 qh->channel->qh = NULL;
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02004743fail2:
Gregory Herrero33ad2612015-04-29 22:09:15 +02004744 spin_unlock_irqrestore(&hsotg->lock, flags);
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004745 urb->hcpriv = NULL;
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02004746 kfree(qtd);
Vardan Mikayelyanb0d659022016-04-27 20:20:51 -07004747 qtd = NULL;
Mian Yousaf Kaukabb5a468a2015-06-29 11:05:29 +02004748fail1:
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02004749 if (qh_allocated) {
4750 struct dwc2_qtd *qtd2, *qtd2_tmp;
4751
4752 ep->hcpriv = NULL;
4753 dwc2_hcd_qh_unlink(hsotg, qh);
4754 /* Free each QTD in the QH's QTD list */
4755 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
John Youn9da51972017-01-17 20:30:27 -08004756 qtd_list_entry)
Mian Yousaf Kaukabb58e6ce2015-06-29 11:05:28 +02004757 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
4758 dwc2_hcd_qh_free(hsotg, qh);
4759 }
Gregory Herrero33ad2612015-04-29 22:09:15 +02004760fail0:
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004761 kfree(dwc2_urb);
4762
Paul Zimmerman7359d482013-03-11 17:47:59 -07004763 return retval;
4764}
4765
4766/*
4767 * Aborts/cancels a USB transfer request. Always returns 0 to indicate success.
4768 */
4769static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
4770 int status)
4771{
4772 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004773 int rc;
Paul Zimmerman7359d482013-03-11 17:47:59 -07004774 unsigned long flags;
4775
4776 dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
4777 dwc2_dump_urb_info(hcd, urb, "urb_dequeue");
4778
4779 spin_lock_irqsave(&hsotg->lock, flags);
4780
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004781 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
4782 if (rc)
4783 goto out;
4784
Paul Zimmerman7359d482013-03-11 17:47:59 -07004785 if (!urb->hcpriv) {
4786 dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
4787 goto out;
4788 }
4789
4790 rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
4791
Paul Zimmermanc9e1c902013-07-13 14:53:49 -07004792 usb_hcd_unlink_urb_from_ep(hcd, urb);
4793
Paul Zimmerman7359d482013-03-11 17:47:59 -07004794 kfree(urb->hcpriv);
4795 urb->hcpriv = NULL;
4796
4797 /* Higher layer software sets URB status */
4798 spin_unlock(&hsotg->lock);
4799 usb_hcd_giveback_urb(hcd, urb, status);
4800 spin_lock(&hsotg->lock);
4801
4802 dev_dbg(hsotg->dev, "Called usb_hcd_giveback_urb()\n");
4803 dev_dbg(hsotg->dev, " urb->status = %d\n", urb->status);
4804out:
4805 spin_unlock_irqrestore(&hsotg->lock, flags);
4806
4807 return rc;
4808}
4809
4810/*
4811 * Frees resources in the DWC_otg controller related to a given endpoint. Also
4812 * clears state in the HCD related to the endpoint. Any URBs for the endpoint
4813 * must already be dequeued.
4814 */
4815static void _dwc2_hcd_endpoint_disable(struct usb_hcd *hcd,
4816 struct usb_host_endpoint *ep)
4817{
4818 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4819
4820 dev_dbg(hsotg->dev,
4821 "DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, ep->hcpriv=%p\n",
4822 ep->desc.bEndpointAddress, ep->hcpriv);
4823 dwc2_hcd_endpoint_disable(hsotg, ep, 250);
4824}
4825
4826/*
4827 * Resets endpoint specific parameter values, in current version used to reset
4828 * the data toggle (as a WA). This function can be called from usb_clear_halt
4829 * routine.
4830 */
4831static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
4832 struct usb_host_endpoint *ep)
4833{
4834 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004835 unsigned long flags;
4836
4837 dev_dbg(hsotg->dev,
4838 "DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
4839 ep->desc.bEndpointAddress);
4840
Paul Zimmerman7359d482013-03-11 17:47:59 -07004841 spin_lock_irqsave(&hsotg->lock, flags);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004842 dwc2_hcd_endpoint_reset(hsotg, ep);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004843 spin_unlock_irqrestore(&hsotg->lock, flags);
4844}
4845
4846/*
4847 * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
4848 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
4849 * interrupt.
4850 *
4851 * This function is called by the USB core when an interrupt occurs
4852 */
4853static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd)
4854{
4855 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004856
Matthijs Kooijmanca18f4a2013-04-25 23:39:15 +02004857 return dwc2_handle_hcd_intr(hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -07004858}
4859
4860/*
4861 * Creates Status Change bitmap for the root hub and root port. The bitmap is
4862 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
4863 * is the status change indicator for the single root port. Returns 1 if either
4864 * change indicator is 1, otherwise returns 0.
4865 */
4866static int _dwc2_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
4867{
4868 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4869
4870 buf[0] = dwc2_hcd_is_status_changed(hsotg, 1) << 1;
4871 return buf[0] != 0;
4872}
4873
4874/* Handles hub class-specific requests */
4875static int _dwc2_hcd_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue,
4876 u16 windex, char *buf, u16 wlength)
4877{
4878 int retval = dwc2_hcd_hub_control(dwc2_hcd_to_hsotg(hcd), typereq,
4879 wvalue, windex, buf, wlength);
4880 return retval;
4881}
4882
4883/* Handles hub TT buffer clear completions */
4884static void _dwc2_hcd_clear_tt_buffer_complete(struct usb_hcd *hcd,
4885 struct usb_host_endpoint *ep)
4886{
4887 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4888 struct dwc2_qh *qh;
4889 unsigned long flags;
4890
4891 qh = ep->hcpriv;
4892 if (!qh)
4893 return;
4894
4895 spin_lock_irqsave(&hsotg->lock, flags);
4896 qh->tt_buffer_dirty = 0;
4897
4898 if (hsotg->flags.b.port_connect_status)
4899 dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_ALL);
4900
4901 spin_unlock_irqrestore(&hsotg->lock, flags);
4902}
4903
Chen Yuca8b0332017-01-23 15:00:18 -08004904/*
4905 * HPRT0_SPD_HIGH_SPEED: high speed
4906 * HPRT0_SPD_FULL_SPEED: full speed
4907 */
4908static void dwc2_change_bus_speed(struct usb_hcd *hcd, int speed)
4909{
4910 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4911
4912 if (hsotg->params.speed == speed)
4913 return;
4914
4915 hsotg->params.speed = speed;
4916 queue_work(hsotg->wq_otg, &hsotg->wf_otg);
4917}
4918
4919static void dwc2_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
4920{
4921 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4922
4923 if (!hsotg->params.change_speed_quirk)
4924 return;
4925
4926 /*
4927 * On removal, set speed to default high-speed.
4928 */
4929 if (udev->parent && udev->parent->speed > USB_SPEED_UNKNOWN &&
4930 udev->parent->speed < USB_SPEED_HIGH) {
4931 dev_info(hsotg->dev, "Set speed to default high-speed\n");
4932 dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
4933 }
4934}
4935
4936static int dwc2_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
4937{
4938 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4939
4940 if (!hsotg->params.change_speed_quirk)
4941 return 0;
4942
4943 if (udev->speed == USB_SPEED_HIGH) {
4944 dev_info(hsotg->dev, "Set speed to high-speed\n");
4945 dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED);
4946 } else if ((udev->speed == USB_SPEED_FULL ||
4947 udev->speed == USB_SPEED_LOW)) {
4948 /*
4949 * Change speed setting to full-speed if there's
4950 * a full-speed or low-speed device plugged in.
4951 */
4952 dev_info(hsotg->dev, "Set speed to full-speed\n");
4953 dwc2_change_bus_speed(hcd, HPRT0_SPD_FULL_SPEED);
4954 }
4955
4956 return 0;
4957}
4958
Paul Zimmerman7359d482013-03-11 17:47:59 -07004959static struct hc_driver dwc2_hc_driver = {
4960 .description = "dwc2_hsotg",
4961 .product_desc = "DWC OTG Controller",
4962 .hcd_priv_size = sizeof(struct wrapper_priv_data),
4963
4964 .irq = _dwc2_hcd_irq,
Douglas Anderson8add17c2016-01-28 18:20:00 -08004965 .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
Paul Zimmerman7359d482013-03-11 17:47:59 -07004966
4967 .start = _dwc2_hcd_start,
4968 .stop = _dwc2_hcd_stop,
4969 .urb_enqueue = _dwc2_hcd_urb_enqueue,
4970 .urb_dequeue = _dwc2_hcd_urb_dequeue,
4971 .endpoint_disable = _dwc2_hcd_endpoint_disable,
4972 .endpoint_reset = _dwc2_hcd_endpoint_reset,
4973 .get_frame_number = _dwc2_hcd_get_frame_number,
4974
4975 .hub_status_data = _dwc2_hcd_hub_status_data,
4976 .hub_control = _dwc2_hcd_hub_control,
4977 .clear_tt_buffer_complete = _dwc2_hcd_clear_tt_buffer_complete,
Gregory Herrero99a65792015-04-29 22:09:13 +02004978
4979 .bus_suspend = _dwc2_hcd_suspend,
4980 .bus_resume = _dwc2_hcd_resume,
Douglas Anderson3bc04e22016-01-28 18:19:53 -08004981
4982 .map_urb_for_dma = dwc2_map_urb_for_dma,
4983 .unmap_urb_for_dma = dwc2_unmap_urb_for_dma,
Paul Zimmerman7359d482013-03-11 17:47:59 -07004984};
4985
4986/*
4987 * Frees secondary storage associated with the dwc2_hsotg structure contained
4988 * in the struct usb_hcd field
4989 */
4990static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
4991{
4992 u32 ahbcfg;
4993 u32 dctl;
4994 int i;
4995
4996 dev_dbg(hsotg->dev, "DWC OTG HCD FREE\n");
4997
4998 /* Free memory for QH/QTD lists */
4999 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_inactive);
Douglas Anderson38d2b5f2017-12-12 10:30:31 -08005000 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_waiting);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005001 dwc2_qh_list_free(hsotg, &hsotg->non_periodic_sched_active);
5002 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_inactive);
5003 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_ready);
5004 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_assigned);
5005 dwc2_qh_list_free(hsotg, &hsotg->periodic_sched_queued);
5006
5007 /* Free memory for the host channels */
5008 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
5009 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i];
5010
John Youn9da51972017-01-17 20:30:27 -08005011 if (chan) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07005012 dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n",
5013 i, chan);
5014 hsotg->hc_ptr_array[i] = NULL;
5015 kfree(chan);
5016 }
5017 }
5018
John Youn95832c02017-01-23 14:57:26 -08005019 if (hsotg->params.host_dma) {
Paul Zimmerman7359d482013-03-11 17:47:59 -07005020 if (hsotg->status_buf) {
5021 dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
5022 hsotg->status_buf,
5023 hsotg->status_buf_dma);
5024 hsotg->status_buf = NULL;
5025 }
5026 } else {
5027 kfree(hsotg->status_buf);
5028 hsotg->status_buf = NULL;
5029 }
5030
Antti Seppälä95c8bc32015-08-20 21:41:07 +03005031 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005032
5033 /* Disable all interrupts */
5034 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03005035 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
5036 dwc2_writel(0, hsotg->regs + GINTMSK);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005037
Matthijs Kooijman9badec22013-08-30 18:45:21 +02005038 if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03005039 dctl = dwc2_readl(hsotg->regs + DCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005040 dctl |= DCTL_SFTDISCON;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03005041 dwc2_writel(dctl, hsotg->regs + DCTL);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005042 }
5043
5044 if (hsotg->wq_otg) {
5045 if (!cancel_work_sync(&hsotg->wf_otg))
5046 flush_workqueue(hsotg->wq_otg);
5047 destroy_workqueue(hsotg->wq_otg);
5048 }
5049
Paul Zimmerman7359d482013-03-11 17:47:59 -07005050 del_timer(&hsotg->wkp_timer);
5051}
5052
5053static void dwc2_hcd_release(struct dwc2_hsotg *hsotg)
5054{
5055 /* Turn off all host-specific interrupts */
5056 dwc2_disable_host_interrupts(hsotg);
5057
5058 dwc2_hcd_free(hsotg);
5059}
5060
Matthijs Kooijman8284f932013-04-11 18:43:47 +02005061/*
Paul Zimmerman7359d482013-03-11 17:47:59 -07005062 * Initializes the HCD. This function allocates memory for and initializes the
5063 * static parts of the usb_hcd and dwc2_hsotg structures. It also registers the
5064 * USB bus with the core and calls the hc_driver->start() function. It returns
5065 * a negative error on failure.
5066 */
Heiner Kallweit4fe160d2017-01-25 23:13:37 +01005067int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
Paul Zimmerman7359d482013-03-11 17:47:59 -07005068{
Heiner Kallweit348becd2017-01-25 23:10:51 +01005069 struct platform_device *pdev = to_platform_device(hsotg->dev);
5070 struct resource *res;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005071 struct usb_hcd *hcd;
5072 struct dwc2_host_chan *channel;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02005073 u32 hcfg;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005074 int i, num_channels;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02005075 int retval;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005076
Dinh Nguyenf5500ec2014-11-11 11:13:39 -06005077 if (usb_disabled())
5078 return -ENODEV;
5079
Paul Zimmermane62662c2013-03-25 17:03:35 -07005080 dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -07005081
Matthijs Kooijman9badec22013-08-30 18:45:21 +02005082 retval = -ENOMEM;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005083
Antti Seppälä95c8bc32015-08-20 21:41:07 +03005084 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005085 dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005086
5087#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5088 hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) *
5089 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
5090 if (!hsotg->frame_num_array)
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005091 goto error1;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005092 hsotg->last_frame_num_array = kzalloc(
5093 sizeof(*hsotg->last_frame_num_array) *
5094 FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
5095 if (!hsotg->last_frame_num_array)
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005096 goto error1;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005097#endif
Douglas Anderson483bb252016-01-28 18:20:07 -08005098 hsotg->last_frame_num = HFNUM_MAX_FRNUM;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005099
Matthijs Kooijmana0112f42013-07-19 11:34:22 +02005100 /* Check if the bus driver or platform code has setup a dma_mask */
John Youn95832c02017-01-23 14:57:26 -08005101 if (hsotg->params.host_dma &&
John Youn9da51972017-01-17 20:30:27 -08005102 !hsotg->dev->dma_mask) {
Matthijs Kooijmana0112f42013-07-19 11:34:22 +02005103 dev_warn(hsotg->dev,
5104 "dma_mask not set, disabling DMA\n");
Nicholas Mc Guirefdb09b32017-01-12 16:55:02 +01005105 hsotg->params.host_dma = false;
John Youn95832c02017-01-23 14:57:26 -08005106 hsotg->params.dma_desc_enable = false;
Matthijs Kooijmana0112f42013-07-19 11:34:22 +02005107 }
5108
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005109 /* Set device flags indicating whether the HCD supports DMA */
John Youn95832c02017-01-23 14:57:26 -08005110 if (hsotg->params.host_dma) {
Paul Zimmerman30885312013-05-24 16:27:56 -07005111 if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
5112 dev_warn(hsotg->dev, "can't set DMA mask\n");
Paul Zimmerman25a49442013-07-13 14:53:53 -07005113 if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
5114 dev_warn(hsotg->dev, "can't set coherent DMA mask\n");
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005115 }
5116
Chen Yuca8b0332017-01-23 15:00:18 -08005117 if (hsotg->params.change_speed_quirk) {
5118 dwc2_hc_driver.free_dev = dwc2_free_dev;
5119 dwc2_hc_driver.reset_device = dwc2_reset_device;
5120 }
5121
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005122 hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev));
5123 if (!hcd)
5124 goto error1;
5125
John Youn95832c02017-01-23 14:57:26 -08005126 if (!hsotg->params.host_dma)
Matthijs Kooijman7de76ee2013-07-19 11:34:23 +02005127 hcd->self.uses_dma = 0;
5128
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005129 hcd->has_tt = 1;
5130
Heiner Kallweit348becd2017-01-25 23:10:51 +01005131 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5132 hcd->rsrc_start = res->start;
5133 hcd->rsrc_len = resource_size(res);
5134
John Youn9da51972017-01-17 20:30:27 -08005135 ((struct wrapper_priv_data *)&hcd->hcd_priv)->hsotg = hsotg;
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005136 hsotg->priv = hcd;
5137
Paul Zimmerman7359d482013-03-11 17:47:59 -07005138 /*
5139 * Disable the global interrupt until all the interrupt handlers are
5140 * installed
5141 */
5142 dwc2_disable_global_interrupts(hsotg);
5143
Matthijs Kooijman6706c722013-04-11 17:52:41 +02005144 /* Initialize the DWC_otg core, and select the Phy type */
Douglas Anderson0fe239b2015-12-17 11:14:40 -08005145 retval = dwc2_core_init(hsotg, true);
Matthijs Kooijman6706c722013-04-11 17:52:41 +02005146 if (retval)
5147 goto error2;
5148
Paul Zimmerman7359d482013-03-11 17:47:59 -07005149 /* Create new workqueue and init work */
Wei Yongjun53510352013-04-12 22:41:48 +08005150 retval = -ENOMEM;
Bhaktipriya Shridharec7b1262016-07-28 13:57:29 +05305151 hsotg->wq_otg = alloc_ordered_workqueue("dwc2", 0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005152 if (!hsotg->wq_otg) {
5153 dev_err(hsotg->dev, "Failed to create workqueue\n");
5154 goto error2;
5155 }
5156 INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
5157
Kees Cooke99e88a2017-10-16 14:43:17 -07005158 timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005159
5160 /* Initialize the non-periodic schedule */
5161 INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
Douglas Anderson38d2b5f2017-12-12 10:30:31 -08005162 INIT_LIST_HEAD(&hsotg->non_periodic_sched_waiting);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005163 INIT_LIST_HEAD(&hsotg->non_periodic_sched_active);
5164
5165 /* Initialize the periodic schedule */
5166 INIT_LIST_HEAD(&hsotg->periodic_sched_inactive);
5167 INIT_LIST_HEAD(&hsotg->periodic_sched_ready);
5168 INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
5169 INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
5170
Douglas Andersonc9c8ac02016-01-28 18:19:57 -08005171 INIT_LIST_HEAD(&hsotg->split_order);
5172
Paul Zimmerman7359d482013-03-11 17:47:59 -07005173 /*
5174 * Create a host channel descriptor for each host channel implemented
5175 * in the controller. Initialize the channel descriptor array.
5176 */
5177 INIT_LIST_HEAD(&hsotg->free_hc_list);
John Younbea8e862016-11-03 17:55:53 -07005178 num_channels = hsotg->params.host_channels;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005179 memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
5180
5181 for (i = 0; i < num_channels; i++) {
5182 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
John Youn9da51972017-01-17 20:30:27 -08005183 if (!channel)
Paul Zimmerman7359d482013-03-11 17:47:59 -07005184 goto error3;
5185 channel->hc_num = i;
Douglas Andersonc9c8ac02016-01-28 18:19:57 -08005186 INIT_LIST_HEAD(&channel->split_order_list_entry);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005187 hsotg->hc_ptr_array[i] = channel;
5188 }
5189
5190 /* Initialize hsotg start work */
5191 INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
5192
5193 /* Initialize port reset work */
5194 INIT_DELAYED_WORK(&hsotg->reset_work, dwc2_hcd_reset_func);
5195
5196 /*
5197 * Allocate space for storing data on status transactions. Normally no
5198 * data is sent, but this space acts as a bit bucket. This must be
5199 * done after usb_add_hcd since that function allocates the DMA buffer
5200 * pool.
5201 */
John Youn95832c02017-01-23 14:57:26 -08005202 if (hsotg->params.host_dma)
Paul Zimmerman7359d482013-03-11 17:47:59 -07005203 hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
5204 DWC2_HCD_STATUS_BUF_SIZE,
5205 &hsotg->status_buf_dma, GFP_KERNEL);
5206 else
5207 hsotg->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE,
5208 GFP_KERNEL);
5209
5210 if (!hsotg->status_buf)
5211 goto error3;
5212
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005213 /*
5214 * Create kmem caches to handle descriptor buffers in descriptor
5215 * DMA mode.
5216 * Alignment must be set to 512 bytes.
5217 */
John Younbea8e862016-11-03 17:55:53 -07005218 if (hsotg->params.dma_desc_enable ||
5219 hsotg->params.dma_desc_fs_enable) {
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005220 hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc",
Vahram Aharonyanec703252016-11-09 19:27:43 -08005221 sizeof(struct dwc2_dma_desc) *
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005222 MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA,
5223 NULL);
5224 if (!hsotg->desc_gen_cache) {
5225 dev_err(hsotg->dev,
5226 "unable to create dwc2 generic desc cache\n");
5227
5228 /*
5229 * Disable descriptor dma mode since it will not be
5230 * usable.
5231 */
John Youn95832c02017-01-23 14:57:26 -08005232 hsotg->params.dma_desc_enable = false;
5233 hsotg->params.dma_desc_fs_enable = false;
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005234 }
5235
5236 hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc",
Vahram Aharonyanec703252016-11-09 19:27:43 -08005237 sizeof(struct dwc2_dma_desc) *
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005238 MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL);
5239 if (!hsotg->desc_hsisoc_cache) {
5240 dev_err(hsotg->dev,
5241 "unable to create dwc2 hs isoc desc cache\n");
5242
5243 kmem_cache_destroy(hsotg->desc_gen_cache);
5244
5245 /*
5246 * Disable descriptor dma mode since it will not be
5247 * usable.
5248 */
John Youn95832c02017-01-23 14:57:26 -08005249 hsotg->params.dma_desc_enable = false;
5250 hsotg->params.dma_desc_fs_enable = false;
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005251 }
5252 }
5253
Paul Zimmerman7359d482013-03-11 17:47:59 -07005254 hsotg->otg_port = 1;
5255 hsotg->frame_list = NULL;
5256 hsotg->frame_list_dma = 0;
5257 hsotg->periodic_qh_count = 0;
5258
5259 /* Initiate lx_state to L3 disconnected state */
5260 hsotg->lx_state = DWC2_L3;
5261
5262 hcd->self.otg_port = hsotg->otg_port;
5263
5264 /* Don't support SG list at this point */
5265 hcd->self.sg_tablesize = 0;
5266
Mian Yousaf Kaukab9df4cea2015-04-29 22:09:12 +02005267 if (!IS_ERR_OR_NULL(hsotg->uphy))
5268 otg_set_host(hsotg->uphy->otg, &hcd->self);
5269
Paul Zimmerman7359d482013-03-11 17:47:59 -07005270 /*
5271 * Finish generic HCD initialization and start the HCD. This function
5272 * allocates the DMA buffer pool, registers the USB bus, requests the
5273 * IRQ line, and calls hcd_start method.
5274 */
Heiner Kallweit4fe160d2017-01-25 23:13:37 +01005275 retval = usb_add_hcd(hcd, hsotg->irq, IRQF_SHARED);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005276 if (retval < 0)
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005277 goto error4;
Paul Zimmerman7359d482013-03-11 17:47:59 -07005278
Peter Chen3c9740a2013-11-05 10:46:02 +08005279 device_wakeup_enable(hcd->self.controller);
5280
Paul Zimmerman7359d482013-03-11 17:47:59 -07005281 dwc2_hcd_dump_state(hsotg);
5282
5283 dwc2_enable_global_interrupts(hsotg);
5284
5285 return 0;
5286
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005287error4:
5288 kmem_cache_destroy(hsotg->desc_gen_cache);
5289 kmem_cache_destroy(hsotg->desc_hsisoc_cache);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005290error3:
5291 dwc2_hcd_release(hsotg);
5292error2:
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005293 usb_put_hcd(hcd);
5294error1:
Paul Zimmerman7359d482013-03-11 17:47:59 -07005295
5296#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5297 kfree(hsotg->last_frame_num_array);
5298 kfree(hsotg->frame_num_array);
5299#endif
5300
Paul Zimmermane62662c2013-03-25 17:03:35 -07005301 dev_err(hsotg->dev, "%s() FAILED, returning %d\n", __func__, retval);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005302 return retval;
5303}
Paul Zimmerman7359d482013-03-11 17:47:59 -07005304
5305/*
5306 * Removes the HCD.
5307 * Frees memory and resources associated with the HCD and deregisters the bus.
5308 */
Paul Zimmermane62662c2013-03-25 17:03:35 -07005309void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
Paul Zimmerman7359d482013-03-11 17:47:59 -07005310{
5311 struct usb_hcd *hcd;
5312
Paul Zimmermane62662c2013-03-25 17:03:35 -07005313 dev_dbg(hsotg->dev, "DWC OTG HCD REMOVE\n");
Paul Zimmerman7359d482013-03-11 17:47:59 -07005314
5315 hcd = dwc2_hsotg_to_hcd(hsotg);
Paul Zimmermane62662c2013-03-25 17:03:35 -07005316 dev_dbg(hsotg->dev, "hsotg->hcd = %p\n", hcd);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005317
5318 if (!hcd) {
Paul Zimmermane62662c2013-03-25 17:03:35 -07005319 dev_dbg(hsotg->dev, "%s: dwc2_hsotg_to_hcd(hsotg) NULL!\n",
Paul Zimmerman7359d482013-03-11 17:47:59 -07005320 __func__);
5321 return;
5322 }
5323
Mian Yousaf Kaukab9df4cea2015-04-29 22:09:12 +02005324 if (!IS_ERR_OR_NULL(hsotg->uphy))
5325 otg_set_host(hsotg->uphy->otg, NULL);
5326
Paul Zimmerman7359d482013-03-11 17:47:59 -07005327 usb_remove_hcd(hcd);
5328 hsotg->priv = NULL;
Gregory Herrero3b5fcc92015-11-20 11:49:31 +01005329
5330 kmem_cache_destroy(hsotg->desc_gen_cache);
5331 kmem_cache_destroy(hsotg->desc_hsisoc_cache);
5332
Paul Zimmerman7359d482013-03-11 17:47:59 -07005333 dwc2_hcd_release(hsotg);
Paul Zimmermanba0e60d2013-03-25 17:03:36 -07005334 usb_put_hcd(hcd);
Paul Zimmerman7359d482013-03-11 17:47:59 -07005335
5336#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
5337 kfree(hsotg->last_frame_num_array);
5338 kfree(hsotg->frame_num_array);
5339#endif
Paul Zimmerman7359d482013-03-11 17:47:59 -07005340}
John Youn58e52ff6a2016-02-23 19:54:57 -08005341
5342/**
5343 * dwc2_backup_host_registers() - Backup controller host registers.
5344 * When suspending usb bus, registers needs to be backuped
5345 * if controller power is disabled once suspended.
5346 *
5347 * @hsotg: Programming view of the DWC_otg controller
5348 */
5349int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
5350{
5351 struct dwc2_hregs_backup *hr;
5352 int i;
5353
5354 dev_dbg(hsotg->dev, "%s\n", __func__);
5355
5356 /* Backup Host regs */
5357 hr = &hsotg->hr_backup;
5358 hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
5359 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
John Younbea8e862016-11-03 17:55:53 -07005360 for (i = 0; i < hsotg->params.host_channels; ++i)
John Youn58e52ff6a2016-02-23 19:54:57 -08005361 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
5362
5363 hr->hprt0 = dwc2_read_hprt0(hsotg);
5364 hr->hfir = dwc2_readl(hsotg->regs + HFIR);
5365 hr->valid = true;
5366
5367 return 0;
5368}
5369
5370/**
5371 * dwc2_restore_host_registers() - Restore controller host registers.
5372 * When resuming usb bus, device registers needs to be restored
5373 * if controller power were disabled.
5374 *
5375 * @hsotg: Programming view of the DWC_otg controller
5376 */
5377int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
5378{
5379 struct dwc2_hregs_backup *hr;
5380 int i;
5381
5382 dev_dbg(hsotg->dev, "%s\n", __func__);
5383
5384 /* Restore host regs */
5385 hr = &hsotg->hr_backup;
5386 if (!hr->valid) {
5387 dev_err(hsotg->dev, "%s: no host registers to restore\n",
5388 __func__);
5389 return -EINVAL;
5390 }
5391 hr->valid = false;
5392
5393 dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
5394 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
5395
John Younbea8e862016-11-03 17:55:53 -07005396 for (i = 0; i < hsotg->params.host_channels; ++i)
John Youn58e52ff6a2016-02-23 19:54:57 -08005397 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
5398
5399 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
5400 dwc2_writel(hr->hfir, hsotg->regs + HFIR);
5401 hsotg->frame_number = 0;
5402
5403 return 0;
5404}