blob: 281ca95dc7f4c6e28c9ecc5f850a315ecf95697c [file] [log] [blame]
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001/*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/dma-mapping.h>
48#include <linux/delay.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/usb.h>
52
53#include <linux/usb/hcd.h>
54#include <linux/usb/ch11.h>
55
56#include "core.h"
57#include "hcd.h"
58
59/**
60 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
61 * used in both device and host modes
62 *
63 * @hsotg: Programming view of the DWC_otg controller
64 */
65static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
66{
67 u32 intmsk;
68
69 /* Clear any pending OTG Interrupts */
70 writel(0xffffffff, hsotg->regs + GOTGINT);
71
72 /* Clear any pending interrupts */
73 writel(0xffffffff, hsotg->regs + GINTSTS);
74
75 /* Enable the interrupts in the GINTMSK */
76 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
77
78 if (hsotg->core_params->dma_enable <= 0)
79 intmsk |= GINTSTS_RXFLVL;
80
81 intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
82 GINTSTS_SESSREQINT;
83
84 writel(intmsk, hsotg->regs + GINTMSK);
85}
86
87/*
88 * Initializes the FSLSPClkSel field of the HCFG register depending on the
89 * PHY type
90 */
91static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
92{
93 u32 hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
94 u32 fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
95 u32 hcfg, val;
96
97 if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
98 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
99 hsotg->core_params->ulpi_fs_ls > 0) ||
100 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
101 /* Full speed PHY */
102 val = HCFG_FSLSPCLKSEL_48_MHZ;
103 } else {
104 /* High speed PHY running at full speed or high speed */
105 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
106 }
107
108 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
109 hcfg = readl(hsotg->regs + HCFG);
110 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
111 hcfg |= val;
112 writel(hcfg, hsotg->regs + HCFG);
113}
114
115/*
116 * Do core a soft reset of the core. Be careful with this because it
117 * resets all the internal state machines of the core.
118 */
119static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
120{
121 u32 greset;
122 int count = 0;
123
124 dev_vdbg(hsotg->dev, "%s()\n", __func__);
125
126 /* Wait for AHB master IDLE state */
127 do {
128 usleep_range(20000, 40000);
129 greset = readl(hsotg->regs + GRSTCTL);
130 if (++count > 50) {
131 dev_warn(hsotg->dev,
132 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
133 __func__, greset);
134 return;
135 }
136 } while (!(greset & GRSTCTL_AHBIDLE));
137
138 /* Core Soft Reset */
139 count = 0;
140 greset |= GRSTCTL_CSFTRST;
141 writel(greset, hsotg->regs + GRSTCTL);
142 do {
143 usleep_range(20000, 40000);
144 greset = readl(hsotg->regs + GRSTCTL);
145 if (++count > 50) {
146 dev_warn(hsotg->dev,
147 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
148 __func__, greset);
149 break;
150 }
151 } while (greset & GRSTCTL_CSFTRST);
152
153 /*
154 * NOTE: This long sleep is _very_ important, otherwise the core will
155 * not stay in host mode after a connector ID change!
156 */
157 usleep_range(150000, 200000);
158}
159
160static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
161{
162 u32 usbcfg, i2cctl;
163
164 /*
165 * core_init() is now called on every switch so only call the
166 * following for the first time through
167 */
168 if (select_phy) {
169 dev_dbg(hsotg->dev, "FS PHY selected\n");
170 usbcfg = readl(hsotg->regs + GUSBCFG);
171 usbcfg |= GUSBCFG_PHYSEL;
172 writel(usbcfg, hsotg->regs + GUSBCFG);
173
174 /* Reset after a PHY select */
175 dwc2_core_reset(hsotg);
176 }
177
178 /*
179 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
180 * do this on HNP Dev/Host mode switches (done in dev_init and
181 * host_init).
182 */
183 if (dwc2_is_host_mode(hsotg))
184 dwc2_init_fs_ls_pclk_sel(hsotg);
185
186 if (hsotg->core_params->i2c_enable > 0) {
187 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
188
189 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
190 usbcfg = readl(hsotg->regs + GUSBCFG);
191 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
192 writel(usbcfg, hsotg->regs + GUSBCFG);
193
194 /* Program GI2CCTL.I2CEn */
195 i2cctl = readl(hsotg->regs + GI2CCTL);
196 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
197 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
198 i2cctl &= ~GI2CCTL_I2CEN;
199 writel(i2cctl, hsotg->regs + GI2CCTL);
200 i2cctl |= GI2CCTL_I2CEN;
201 writel(i2cctl, hsotg->regs + GI2CCTL);
202 }
203}
204
205static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
206{
207 u32 usbcfg;
208
209 if (!select_phy)
210 return;
211
212 usbcfg = readl(hsotg->regs + GUSBCFG);
213
214 /*
215 * HS PHY parameters. These parameters are preserved during soft reset
216 * so only program the first time. Do a soft reset immediately after
217 * setting phyif.
218 */
219 switch (hsotg->core_params->phy_type) {
220 case DWC2_PHY_TYPE_PARAM_ULPI:
221 /* ULPI interface */
222 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
223 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
224 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
225 if (hsotg->core_params->phy_ulpi_ddr > 0)
226 usbcfg |= GUSBCFG_DDRSEL;
227 break;
228 case DWC2_PHY_TYPE_PARAM_UTMI:
229 /* UTMI+ interface */
230 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
231 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
232 if (hsotg->core_params->phy_utmi_width == 16)
233 usbcfg |= GUSBCFG_PHYIF16;
234 break;
235 default:
236 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
237 break;
238 }
239
240 writel(usbcfg, hsotg->regs + GUSBCFG);
241
242 /* Reset after setting the PHY parameters */
243 dwc2_core_reset(hsotg);
244}
245
246static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
247{
248 u32 usbcfg, hs_phy_type, fs_phy_type;
249
250 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
251 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
252 /* If FS mode with FS PHY */
253 dwc2_fs_phy_init(hsotg, select_phy);
254 } else {
255 /* High speed PHY */
256 dwc2_hs_phy_init(hsotg, select_phy);
257 }
258
259 hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
260 fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
261
262 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
263 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
264 hsotg->core_params->ulpi_fs_ls > 0) {
265 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
266 usbcfg = readl(hsotg->regs + GUSBCFG);
267 usbcfg |= GUSBCFG_ULPI_FS_LS;
268 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
269 writel(usbcfg, hsotg->regs + GUSBCFG);
270 } else {
271 usbcfg = readl(hsotg->regs + GUSBCFG);
272 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
273 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
274 writel(usbcfg, hsotg->regs + GUSBCFG);
275 }
276}
277
278static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
279{
280 u32 ahbcfg = 0;
281
282 switch (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) {
283 case GHWCFG2_EXT_DMA_ARCH:
284 dev_err(hsotg->dev, "External DMA Mode not supported\n");
285 return -EINVAL;
286
287 case GHWCFG2_INT_DMA_ARCH:
288 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
289 /*
290 * Old value was GAHBCFG_HBSTLEN_INCR - done for
291 * Host mode ISOC in issue fix - vahrama
292 */
293 ahbcfg |= GAHBCFG_HBSTLEN_INCR4;
294 break;
295
296 case GHWCFG2_SLAVE_ONLY_ARCH:
297 default:
298 dev_dbg(hsotg->dev, "Slave Only Mode\n");
299 break;
300 }
301
302 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
303 hsotg->core_params->dma_enable,
304 hsotg->core_params->dma_desc_enable);
305
306 if (hsotg->core_params->dma_enable > 0) {
307 if (hsotg->core_params->dma_desc_enable > 0)
308 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
309 else
310 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
311 } else {
312 dev_dbg(hsotg->dev, "Using Slave mode\n");
313 hsotg->core_params->dma_desc_enable = 0;
314 }
315
316 if (hsotg->core_params->ahb_single > 0)
317 ahbcfg |= GAHBCFG_AHB_SINGLE;
318
319 if (hsotg->core_params->dma_enable > 0)
320 ahbcfg |= GAHBCFG_DMA_EN;
321
322 writel(ahbcfg, hsotg->regs + GAHBCFG);
323
324 return 0;
325}
326
327static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
328{
329 u32 usbcfg;
330
331 usbcfg = readl(hsotg->regs + GUSBCFG);
332 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
333
334 switch (hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK) {
335 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
336 if (hsotg->core_params->otg_cap ==
337 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
338 usbcfg |= GUSBCFG_HNPCAP;
339 if (hsotg->core_params->otg_cap !=
340 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
341 usbcfg |= GUSBCFG_SRPCAP;
342 break;
343
344 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
345 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
346 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
347 if (hsotg->core_params->otg_cap !=
348 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
349 usbcfg |= GUSBCFG_SRPCAP;
350 break;
351
352 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
353 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
354 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
355 default:
356 break;
357 }
358
359 writel(usbcfg, hsotg->regs + GUSBCFG);
360}
361
362/**
363 * dwc2_core_init() - Initializes the DWC_otg controller registers and
364 * prepares the core for device mode or host mode operation
365 *
366 * @hsotg: Programming view of the DWC_otg controller
367 * @select_phy: If true then also set the Phy type
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200368 * @irq: If >= 0, the irq to register
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700369 */
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200370int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700371{
372 u32 usbcfg, otgctl;
373 int retval;
374
375 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
376
377 usbcfg = readl(hsotg->regs + GUSBCFG);
378
379 /* Set ULPI External VBUS bit if needed */
380 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
381 if (hsotg->core_params->phy_ulpi_ext_vbus ==
382 DWC2_PHY_ULPI_EXTERNAL_VBUS)
383 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
384
385 /* Set external TS Dline pulsing bit if needed */
386 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
387 if (hsotg->core_params->ts_dline > 0)
388 usbcfg |= GUSBCFG_TERMSELDLPULSE;
389
390 writel(usbcfg, hsotg->regs + GUSBCFG);
391
392 /* Reset the Controller */
393 dwc2_core_reset(hsotg);
394
395 dev_dbg(hsotg->dev, "num_dev_perio_in_ep=%d\n",
396 hsotg->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT &
397 GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >>
398 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT);
399
400 hsotg->total_fifo_size = hsotg->hwcfg3 >> GHWCFG3_DFIFO_DEPTH_SHIFT &
401 GHWCFG3_DFIFO_DEPTH_MASK >> GHWCFG3_DFIFO_DEPTH_SHIFT;
402 hsotg->rx_fifo_size = readl(hsotg->regs + GRXFSIZ);
403 hsotg->nperio_tx_fifo_size =
404 readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
405
406 dev_dbg(hsotg->dev, "Total FIFO SZ=%d\n", hsotg->total_fifo_size);
407 dev_dbg(hsotg->dev, "RxFIFO SZ=%d\n", hsotg->rx_fifo_size);
408 dev_dbg(hsotg->dev, "NP TxFIFO SZ=%d\n", hsotg->nperio_tx_fifo_size);
409
410 /*
411 * This needs to happen in FS mode before any other programming occurs
412 */
413 dwc2_phy_init(hsotg, select_phy);
414
415 /* Program the GAHBCFG Register */
416 retval = dwc2_gahbcfg_init(hsotg);
417 if (retval)
418 return retval;
419
420 /* Program the GUSBCFG register */
421 dwc2_gusbcfg_init(hsotg);
422
423 /* Program the GOTGCTL register */
424 otgctl = readl(hsotg->regs + GOTGCTL);
425 otgctl &= ~GOTGCTL_OTGVER;
426 if (hsotg->core_params->otg_ver > 0)
427 otgctl |= GOTGCTL_OTGVER;
428 writel(otgctl, hsotg->regs + GOTGCTL);
429 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
430
431 /* Clear the SRP success bit for FS-I2c */
432 hsotg->srp_success = 0;
433
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200434 if (irq >= 0) {
435 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
436 irq);
437 retval = devm_request_irq(hsotg->dev, irq,
438 dwc2_handle_common_intr, IRQF_SHARED,
439 dev_name(hsotg->dev), hsotg);
440 if (retval)
441 return retval;
442 }
443
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700444 /* Enable common interrupts */
445 dwc2_enable_common_interrupts(hsotg);
446
447 /*
448 * Do device or host intialization based on mode during PCD and
449 * HCD initialization
450 */
451 if (dwc2_is_host_mode(hsotg)) {
452 dev_dbg(hsotg->dev, "Host Mode\n");
453 hsotg->op_state = OTG_STATE_A_HOST;
454 } else {
455 dev_dbg(hsotg->dev, "Device Mode\n");
456 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
457 }
458
459 return 0;
460}
461
462/**
463 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
464 *
465 * @hsotg: Programming view of DWC_otg controller
466 */
467void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
468{
469 u32 intmsk;
470
471 dev_dbg(hsotg->dev, "%s()\n", __func__);
472
473 /* Disable all interrupts */
474 writel(0, hsotg->regs + GINTMSK);
475 writel(0, hsotg->regs + HAINTMSK);
476
477 /* Clear any pending interrupts */
478 writel(0xffffffff, hsotg->regs + GINTSTS);
479
480 /* Enable the common interrupts */
481 dwc2_enable_common_interrupts(hsotg);
482
483 /* Enable host mode interrupts without disturbing common interrupts */
484 intmsk = readl(hsotg->regs + GINTMSK);
485 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
486 writel(intmsk, hsotg->regs + GINTMSK);
487}
488
489/**
490 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
491 *
492 * @hsotg: Programming view of DWC_otg controller
493 */
494void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
495{
496 u32 intmsk = readl(hsotg->regs + GINTMSK);
497
498 /* Disable host mode interrupts without disturbing common interrupts */
499 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
500 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
501 writel(intmsk, hsotg->regs + GINTMSK);
502}
503
504static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
505{
506 struct dwc2_core_params *params = hsotg->core_params;
507 u32 rxfsiz, nptxfsiz, ptxfsiz, hptxfsiz, dfifocfg;
508
509 if (!(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO) ||
510 !params->enable_dynamic_fifo)
511 return;
512
513 dev_dbg(hsotg->dev, "Total FIFO Size=%d\n", hsotg->total_fifo_size);
514 dev_dbg(hsotg->dev, "Rx FIFO Size=%d\n", params->host_rx_fifo_size);
515 dev_dbg(hsotg->dev, "NP Tx FIFO Size=%d\n",
516 params->host_nperio_tx_fifo_size);
517 dev_dbg(hsotg->dev, "P Tx FIFO Size=%d\n",
518 params->host_perio_tx_fifo_size);
519
520 /* Rx FIFO */
521 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n",
522 readl(hsotg->regs + GRXFSIZ));
523 writel(params->host_rx_fifo_size, hsotg->regs + GRXFSIZ);
524 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
525
526 /* Non-periodic Tx FIFO */
527 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
528 readl(hsotg->regs + GNPTXFSIZ));
529 nptxfsiz = params->host_nperio_tx_fifo_size <<
530 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
531 nptxfsiz |= params->host_rx_fifo_size <<
532 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
533 writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
534 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
535 readl(hsotg->regs + GNPTXFSIZ));
536
537 /* Periodic Tx FIFO */
538 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
539 readl(hsotg->regs + HPTXFSIZ));
540 ptxfsiz = params->host_perio_tx_fifo_size <<
541 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
542 ptxfsiz |= (params->host_rx_fifo_size +
543 params->host_nperio_tx_fifo_size) <<
544 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
545 writel(ptxfsiz, hsotg->regs + HPTXFSIZ);
546 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
547 readl(hsotg->regs + HPTXFSIZ));
548
549 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
550 hsotg->snpsid <= DWC2_CORE_REV_2_94a) {
551 /*
552 * Global DFIFOCFG calculation for Host mode -
553 * include RxFIFO, NPTXFIFO and HPTXFIFO
554 */
555 dfifocfg = readl(hsotg->regs + GDFIFOCFG);
556 rxfsiz = readl(hsotg->regs + GRXFSIZ) & 0x0000ffff;
557 nptxfsiz = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
558 hptxfsiz = readl(hsotg->regs + HPTXFSIZ) >> 16 & 0xffff;
559 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
560 dfifocfg |= (rxfsiz + nptxfsiz + hptxfsiz) <<
561 GDFIFOCFG_EPINFOBASE_SHIFT &
562 GDFIFOCFG_EPINFOBASE_MASK;
563 writel(dfifocfg, hsotg->regs + GDFIFOCFG);
564 }
565}
566
567/**
568 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
569 * Host mode
570 *
571 * @hsotg: Programming view of DWC_otg controller
572 *
573 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
574 * request queues. Host channels are reset to ensure that they are ready for
575 * performing transfers.
576 */
577void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
578{
579 u32 hcfg, hfir, otgctl;
580
581 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
582
583 /* Restart the Phy Clock */
584 writel(0, hsotg->regs + PCGCTL);
585
586 /* Initialize Host Configuration Register */
587 dwc2_init_fs_ls_pclk_sel(hsotg);
588 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
589 hcfg = readl(hsotg->regs + HCFG);
590 hcfg |= HCFG_FSLSSUPP;
591 writel(hcfg, hsotg->regs + HCFG);
592 }
593
594 /*
595 * This bit allows dynamic reloading of the HFIR register during
596 * runtime. This bit needs to be programmed during inital configuration
597 * and its value must not be changed during runtime.
598 */
599 if (hsotg->core_params->reload_ctl > 0) {
600 hfir = readl(hsotg->regs + HFIR);
601 hfir |= HFIR_RLDCTRL;
602 writel(hfir, hsotg->regs + HFIR);
603 }
604
605 if (hsotg->core_params->dma_desc_enable > 0) {
606 u32 op_mode = hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK;
607
608 if (hsotg->snpsid < DWC2_CORE_REV_2_90a ||
609 !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA) ||
610 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
611 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
612 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
613 dev_err(hsotg->dev,
614 "Hardware does not support descriptor DMA mode -\n");
615 dev_err(hsotg->dev,
616 "falling back to buffer DMA mode.\n");
617 hsotg->core_params->dma_desc_enable = 0;
618 } else {
619 hcfg = readl(hsotg->regs + HCFG);
620 hcfg |= HCFG_DESCDMA;
621 writel(hcfg, hsotg->regs + HCFG);
622 }
623 }
624
625 /* Configure data FIFO sizes */
626 dwc2_config_fifos(hsotg);
627
628 /* TODO - check this */
629 /* Clear Host Set HNP Enable in the OTG Control Register */
630 otgctl = readl(hsotg->regs + GOTGCTL);
631 otgctl &= ~GOTGCTL_HSTSETHNPEN;
632 writel(otgctl, hsotg->regs + GOTGCTL);
633
634 /* Make sure the FIFOs are flushed */
635 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
636 dwc2_flush_rx_fifo(hsotg);
637
638 /* Clear Host Set HNP Enable in the OTG Control Register */
639 otgctl = readl(hsotg->regs + GOTGCTL);
640 otgctl &= ~GOTGCTL_HSTSETHNPEN;
641 writel(otgctl, hsotg->regs + GOTGCTL);
642
643 if (hsotg->core_params->dma_desc_enable <= 0) {
644 int num_channels, i;
645 u32 hcchar;
646
647 /* Flush out any leftover queued requests */
648 num_channels = hsotg->core_params->host_channels;
649 for (i = 0; i < num_channels; i++) {
650 hcchar = readl(hsotg->regs + HCCHAR(i));
651 hcchar &= ~HCCHAR_CHENA;
652 hcchar |= HCCHAR_CHDIS;
653 hcchar &= ~HCCHAR_EPDIR;
654 writel(hcchar, hsotg->regs + HCCHAR(i));
655 }
656
657 /* Halt all channels to put them into a known state */
658 for (i = 0; i < num_channels; i++) {
659 int count = 0;
660
661 hcchar = readl(hsotg->regs + HCCHAR(i));
662 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
663 hcchar &= ~HCCHAR_EPDIR;
664 writel(hcchar, hsotg->regs + HCCHAR(i));
665 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
666 __func__, i);
667 do {
668 hcchar = readl(hsotg->regs + HCCHAR(i));
669 if (++count > 1000) {
670 dev_err(hsotg->dev,
671 "Unable to clear enable on channel %d\n",
672 i);
673 break;
674 }
675 udelay(1);
676 } while (hcchar & HCCHAR_CHENA);
677 }
678 }
679
680 /* Turn on the vbus power */
681 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
682 if (hsotg->op_state == OTG_STATE_A_HOST) {
683 u32 hprt0 = dwc2_read_hprt0(hsotg);
684
685 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
686 !!(hprt0 & HPRT0_PWR));
687 if (!(hprt0 & HPRT0_PWR)) {
688 hprt0 |= HPRT0_PWR;
689 writel(hprt0, hsotg->regs + HPRT0);
690 }
691 }
692
693 dwc2_enable_host_interrupts(hsotg);
694}
695
696static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
697 struct dwc2_host_chan *chan)
698{
699 u32 hcintmsk = HCINTMSK_CHHLTD;
700
701 switch (chan->ep_type) {
702 case USB_ENDPOINT_XFER_CONTROL:
703 case USB_ENDPOINT_XFER_BULK:
704 dev_vdbg(hsotg->dev, "control/bulk\n");
705 hcintmsk |= HCINTMSK_XFERCOMPL;
706 hcintmsk |= HCINTMSK_STALL;
707 hcintmsk |= HCINTMSK_XACTERR;
708 hcintmsk |= HCINTMSK_DATATGLERR;
709 if (chan->ep_is_in) {
710 hcintmsk |= HCINTMSK_BBLERR;
711 } else {
712 hcintmsk |= HCINTMSK_NAK;
713 hcintmsk |= HCINTMSK_NYET;
714 if (chan->do_ping)
715 hcintmsk |= HCINTMSK_ACK;
716 }
717
718 if (chan->do_split) {
719 hcintmsk |= HCINTMSK_NAK;
720 if (chan->complete_split)
721 hcintmsk |= HCINTMSK_NYET;
722 else
723 hcintmsk |= HCINTMSK_ACK;
724 }
725
726 if (chan->error_state)
727 hcintmsk |= HCINTMSK_ACK;
728 break;
729
730 case USB_ENDPOINT_XFER_INT:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200731 if (dbg_perio())
732 dev_vdbg(hsotg->dev, "intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700733 hcintmsk |= HCINTMSK_XFERCOMPL;
734 hcintmsk |= HCINTMSK_NAK;
735 hcintmsk |= HCINTMSK_STALL;
736 hcintmsk |= HCINTMSK_XACTERR;
737 hcintmsk |= HCINTMSK_DATATGLERR;
738 hcintmsk |= HCINTMSK_FRMOVRUN;
739
740 if (chan->ep_is_in)
741 hcintmsk |= HCINTMSK_BBLERR;
742 if (chan->error_state)
743 hcintmsk |= HCINTMSK_ACK;
744 if (chan->do_split) {
745 if (chan->complete_split)
746 hcintmsk |= HCINTMSK_NYET;
747 else
748 hcintmsk |= HCINTMSK_ACK;
749 }
750 break;
751
752 case USB_ENDPOINT_XFER_ISOC:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200753 if (dbg_perio())
754 dev_vdbg(hsotg->dev, "isoc\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700755 hcintmsk |= HCINTMSK_XFERCOMPL;
756 hcintmsk |= HCINTMSK_FRMOVRUN;
757 hcintmsk |= HCINTMSK_ACK;
758
759 if (chan->ep_is_in) {
760 hcintmsk |= HCINTMSK_XACTERR;
761 hcintmsk |= HCINTMSK_BBLERR;
762 }
763 break;
764 default:
765 dev_err(hsotg->dev, "## Unknown EP type ##\n");
766 break;
767 }
768
769 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200770 if (dbg_hc(chan))
771 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700772}
773
774static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
775 struct dwc2_host_chan *chan)
776{
777 u32 hcintmsk = HCINTMSK_CHHLTD;
778
779 /*
780 * For Descriptor DMA mode core halts the channel on AHB error.
781 * Interrupt is not required.
782 */
783 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200784 if (dbg_hc(chan))
785 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700786 hcintmsk |= HCINTMSK_AHBERR;
787 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200788 if (dbg_hc(chan))
789 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700790 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
791 hcintmsk |= HCINTMSK_XFERCOMPL;
792 }
793
794 if (chan->error_state && !chan->do_split &&
795 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200796 if (dbg_hc(chan))
797 dev_vdbg(hsotg->dev, "setting ACK\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700798 hcintmsk |= HCINTMSK_ACK;
799 if (chan->ep_is_in) {
800 hcintmsk |= HCINTMSK_DATATGLERR;
801 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
802 hcintmsk |= HCINTMSK_NAK;
803 }
804 }
805
806 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200807 if (dbg_hc(chan))
808 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700809}
810
811static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
812 struct dwc2_host_chan *chan)
813{
814 u32 intmsk;
815
816 if (hsotg->core_params->dma_enable > 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200817 if (dbg_hc(chan))
818 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700819 dwc2_hc_enable_dma_ints(hsotg, chan);
820 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200821 if (dbg_hc(chan))
822 dev_vdbg(hsotg->dev, "DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700823 dwc2_hc_enable_slave_ints(hsotg, chan);
824 }
825
826 /* Enable the top level host channel interrupt */
827 intmsk = readl(hsotg->regs + HAINTMSK);
828 intmsk |= 1 << chan->hc_num;
829 writel(intmsk, hsotg->regs + HAINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200830 if (dbg_hc(chan))
831 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700832
833 /* Make sure host channel interrupts are enabled */
834 intmsk = readl(hsotg->regs + GINTMSK);
835 intmsk |= GINTSTS_HCHINT;
836 writel(intmsk, hsotg->regs + GINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200837 if (dbg_hc(chan))
838 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700839}
840
841/**
842 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
843 * a specific endpoint
844 *
845 * @hsotg: Programming view of DWC_otg controller
846 * @chan: Information needed to initialize the host channel
847 *
848 * The HCCHARn register is set up with the characteristics specified in chan.
849 * Host channel interrupts that may need to be serviced while this transfer is
850 * in progress are enabled.
851 */
852void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
853{
854 u8 hc_num = chan->hc_num;
855 u32 hcintmsk;
856 u32 hcchar;
857 u32 hcsplt = 0;
858
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200859 if (dbg_hc(chan))
860 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700861
862 /* Clear old interrupt conditions for this host channel */
863 hcintmsk = 0xffffffff;
864 hcintmsk &= ~HCINTMSK_RESERVED14_31;
865 writel(hcintmsk, hsotg->regs + HCINT(hc_num));
866
867 /* Enable channel interrupts required for this transfer */
868 dwc2_hc_enable_ints(hsotg, chan);
869
870 /*
871 * Program the HCCHARn register with the endpoint characteristics for
872 * the current transfer
873 */
874 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
875 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
876 if (chan->ep_is_in)
877 hcchar |= HCCHAR_EPDIR;
878 if (chan->speed == USB_SPEED_LOW)
879 hcchar |= HCCHAR_LSPDDEV;
880 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
881 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
882 writel(hcchar, hsotg->regs + HCCHAR(hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200883 if (dbg_hc(chan)) {
884 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
885 hc_num, hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700886
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200887 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, hc_num);
888 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
889 hcchar >> HCCHAR_DEVADDR_SHIFT &
890 HCCHAR_DEVADDR_MASK >> HCCHAR_DEVADDR_SHIFT);
891 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
892 hcchar >> HCCHAR_EPNUM_SHIFT &
893 HCCHAR_EPNUM_MASK >> HCCHAR_EPNUM_SHIFT);
894 dev_vdbg(hsotg->dev, " Is In: %d\n",
895 !!(hcchar & HCCHAR_EPDIR));
896 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
897 !!(hcchar & HCCHAR_LSPDDEV));
898 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
899 hcchar >> HCCHAR_EPTYPE_SHIFT &
900 HCCHAR_EPTYPE_MASK >> HCCHAR_EPTYPE_SHIFT);
901 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
902 hcchar >> HCCHAR_MPS_SHIFT &
903 HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
904 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
905 hcchar >> HCCHAR_MULTICNT_SHIFT &
906 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
907 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700908
909 /* Program the HCSPLT register for SPLITs */
910 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200911 if (dbg_hc(chan))
912 dev_vdbg(hsotg->dev,
913 "Programming HC %d with split --> %s\n",
914 hc_num,
915 chan->complete_split ? "CSPLIT" : "SSPLIT");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700916 if (chan->complete_split)
917 hcsplt |= HCSPLT_COMPSPLT;
918 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
919 HCSPLT_XACTPOS_MASK;
920 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
921 HCSPLT_HUBADDR_MASK;
922 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
923 HCSPLT_PRTADDR_MASK;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200924 if (dbg_hc(chan)) {
925 dev_vdbg(hsotg->dev, " comp split %d\n",
926 chan->complete_split);
927 dev_vdbg(hsotg->dev, " xact pos %d\n",
928 chan->xact_pos);
929 dev_vdbg(hsotg->dev, " hub addr %d\n",
930 chan->hub_addr);
931 dev_vdbg(hsotg->dev, " hub port %d\n",
932 chan->hub_port);
933 dev_vdbg(hsotg->dev, " is_in %d\n",
934 chan->ep_is_in);
935 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
936 hcchar >> HCCHAR_MPS_SHIFT &
937 HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
938 dev_vdbg(hsotg->dev, " xferlen %d\n",
939 chan->xfer_len);
940 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700941 }
942
943 writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
944}
945
946/**
947 * dwc2_hc_halt() - Attempts to halt a host channel
948 *
949 * @hsotg: Controller register interface
950 * @chan: Host channel to halt
951 * @halt_status: Reason for halting the channel
952 *
953 * This function should only be called in Slave mode or to abort a transfer in
954 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
955 * controller halts the channel when the transfer is complete or a condition
956 * occurs that requires application intervention.
957 *
958 * In slave mode, checks for a free request queue entry, then sets the Channel
959 * Enable and Channel Disable bits of the Host Channel Characteristics
960 * register of the specified channel to intiate the halt. If there is no free
961 * request queue entry, sets only the Channel Disable bit of the HCCHARn
962 * register to flush requests for this channel. In the latter case, sets a
963 * flag to indicate that the host channel needs to be halted when a request
964 * queue slot is open.
965 *
966 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
967 * HCCHARn register. The controller ensures there is space in the request
968 * queue before submitting the halt request.
969 *
970 * Some time may elapse before the core flushes any posted requests for this
971 * host channel and halts. The Channel Halted interrupt handler completes the
972 * deactivation of the host channel.
973 */
974void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
975 enum dwc2_halt_status halt_status)
976{
977 u32 nptxsts, hptxsts, hcchar;
978
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200979 if (dbg_hc(chan))
980 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700981 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
982 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
983
984 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
985 halt_status == DWC2_HC_XFER_AHB_ERR) {
986 /*
987 * Disable all channel interrupts except Ch Halted. The QTD
988 * and QH state associated with this transfer has been cleared
989 * (in the case of URB_DEQUEUE), so the channel needs to be
990 * shut down carefully to prevent crashes.
991 */
992 u32 hcintmsk = HCINTMSK_CHHLTD;
993
994 dev_vdbg(hsotg->dev, "dequeue/error\n");
995 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
996
997 /*
998 * Make sure no other interrupts besides halt are currently
999 * pending. Handling another interrupt could cause a crash due
1000 * to the QTD and QH state.
1001 */
1002 writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1003
1004 /*
1005 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1006 * even if the channel was already halted for some other
1007 * reason
1008 */
1009 chan->halt_status = halt_status;
1010
1011 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1012 if (!(hcchar & HCCHAR_CHENA)) {
1013 /*
1014 * The channel is either already halted or it hasn't
1015 * started yet. In DMA mode, the transfer may halt if
1016 * it finishes normally or a condition occurs that
1017 * requires driver intervention. Don't want to halt
1018 * the channel again. In either Slave or DMA mode,
1019 * it's possible that the transfer has been assigned
1020 * to a channel, but not started yet when an URB is
1021 * dequeued. Don't want to halt a channel that hasn't
1022 * started yet.
1023 */
1024 return;
1025 }
1026 }
1027 if (chan->halt_pending) {
1028 /*
1029 * A halt has already been issued for this channel. This might
1030 * happen when a transfer is aborted by a higher level in
1031 * the stack.
1032 */
1033 dev_vdbg(hsotg->dev,
1034 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1035 __func__, chan->hc_num);
1036 return;
1037 }
1038
1039 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1040
1041 /* No need to set the bit in DDMA for disabling the channel */
1042 /* TODO check it everywhere channel is disabled */
1043 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001044 if (dbg_hc(chan))
1045 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001046 hcchar |= HCCHAR_CHENA;
1047 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001048 if (dbg_hc(chan))
1049 dev_dbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001050 }
1051 hcchar |= HCCHAR_CHDIS;
1052
1053 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001054 if (dbg_hc(chan))
1055 dev_vdbg(hsotg->dev, "DMA not enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001056 hcchar |= HCCHAR_CHENA;
1057
1058 /* Check for space in the request queue to issue the halt */
1059 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1060 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1061 dev_vdbg(hsotg->dev, "control/bulk\n");
1062 nptxsts = readl(hsotg->regs + GNPTXSTS);
1063 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1064 dev_vdbg(hsotg->dev, "Disabling channel\n");
1065 hcchar &= ~HCCHAR_CHENA;
1066 }
1067 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001068 if (dbg_perio())
1069 dev_vdbg(hsotg->dev, "isoc/intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001070 hptxsts = readl(hsotg->regs + HPTXSTS);
1071 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1072 hsotg->queuing_high_bandwidth) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001073 if (dbg_perio())
1074 dev_vdbg(hsotg->dev, "Disabling channel\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001075 hcchar &= ~HCCHAR_CHENA;
1076 }
1077 }
1078 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001079 if (dbg_hc(chan))
1080 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001081 }
1082
1083 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1084 chan->halt_status = halt_status;
1085
1086 if (hcchar & HCCHAR_CHENA) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001087 if (dbg_hc(chan))
1088 dev_vdbg(hsotg->dev, "Channel enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001089 chan->halt_pending = 1;
1090 chan->halt_on_queue = 0;
1091 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001092 if (dbg_hc(chan))
1093 dev_vdbg(hsotg->dev, "Channel disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001094 chan->halt_on_queue = 1;
1095 }
1096
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001097 if (dbg_hc(chan)) {
1098 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1099 chan->hc_num);
1100 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1101 hcchar);
1102 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1103 chan->halt_pending);
1104 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1105 chan->halt_on_queue);
1106 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1107 chan->halt_status);
1108 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001109}
1110
1111/**
1112 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1113 *
1114 * @hsotg: Programming view of DWC_otg controller
1115 * @chan: Identifies the host channel to clean up
1116 *
1117 * This function is normally called after a transfer is done and the host
1118 * channel is being released
1119 */
1120void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1121{
1122 u32 hcintmsk;
1123
1124 chan->xfer_started = 0;
1125
1126 /*
1127 * Clear channel interrupt enables and any unhandled channel interrupt
1128 * conditions
1129 */
1130 writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1131 hcintmsk = 0xffffffff;
1132 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1133 writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1134}
1135
1136/**
1137 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1138 * which frame a periodic transfer should occur
1139 *
1140 * @hsotg: Programming view of DWC_otg controller
1141 * @chan: Identifies the host channel to set up and its properties
1142 * @hcchar: Current value of the HCCHAR register for the specified host channel
1143 *
1144 * This function has no effect on non-periodic transfers
1145 */
1146static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1147 struct dwc2_host_chan *chan, u32 *hcchar)
1148{
1149 u32 hfnum, frnum;
1150
1151 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1152 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1153 hfnum = readl(hsotg->regs + HFNUM);
1154 frnum = hfnum >> HFNUM_FRNUM_SHIFT &
1155 HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
1156
1157 /* 1 if _next_ frame is odd, 0 if it's even */
1158 if (frnum & 0x1)
1159 *hcchar |= HCCHAR_ODDFRM;
1160 }
1161}
1162
1163static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1164{
1165 /* Set up the initial PID for the transfer */
1166 if (chan->speed == USB_SPEED_HIGH) {
1167 if (chan->ep_is_in) {
1168 if (chan->multi_count == 1)
1169 chan->data_pid_start = DWC2_HC_PID_DATA0;
1170 else if (chan->multi_count == 2)
1171 chan->data_pid_start = DWC2_HC_PID_DATA1;
1172 else
1173 chan->data_pid_start = DWC2_HC_PID_DATA2;
1174 } else {
1175 if (chan->multi_count == 1)
1176 chan->data_pid_start = DWC2_HC_PID_DATA0;
1177 else
1178 chan->data_pid_start = DWC2_HC_PID_MDATA;
1179 }
1180 } else {
1181 chan->data_pid_start = DWC2_HC_PID_DATA0;
1182 }
1183}
1184
1185/**
1186 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1187 * the Host Channel
1188 *
1189 * @hsotg: Programming view of DWC_otg controller
1190 * @chan: Information needed to initialize the host channel
1191 *
1192 * This function should only be called in Slave mode. For a channel associated
1193 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1194 * associated with a periodic EP, the periodic Tx FIFO is written.
1195 *
1196 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1197 * the number of bytes written to the Tx FIFO.
1198 */
1199static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1200 struct dwc2_host_chan *chan)
1201{
1202 u32 i;
1203 u32 remaining_count;
1204 u32 byte_count;
1205 u32 dword_count;
1206 u32 __iomem *data_fifo;
1207 u32 *data_buf = (u32 *)chan->xfer_buf;
1208
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001209 if (dbg_hc(chan))
1210 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001211
1212 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1213
1214 remaining_count = chan->xfer_len - chan->xfer_count;
1215 if (remaining_count > chan->max_packet)
1216 byte_count = chan->max_packet;
1217 else
1218 byte_count = remaining_count;
1219
1220 dword_count = (byte_count + 3) / 4;
1221
1222 if (((unsigned long)data_buf & 0x3) == 0) {
1223 /* xfer_buf is DWORD aligned */
1224 for (i = 0; i < dword_count; i++, data_buf++)
1225 writel(*data_buf, data_fifo);
1226 } else {
1227 /* xfer_buf is not DWORD aligned */
1228 for (i = 0; i < dword_count; i++, data_buf++) {
1229 u32 data = data_buf[0] | data_buf[1] << 8 |
1230 data_buf[2] << 16 | data_buf[3] << 24;
1231 writel(data, data_fifo);
1232 }
1233 }
1234
1235 chan->xfer_count += byte_count;
1236 chan->xfer_buf += byte_count;
1237}
1238
1239/**
1240 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1241 * channel and starts the transfer
1242 *
1243 * @hsotg: Programming view of DWC_otg controller
1244 * @chan: Information needed to initialize the host channel. The xfer_len value
1245 * may be reduced to accommodate the max widths of the XferSize and
1246 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1247 * changed to reflect the final xfer_len value.
1248 *
1249 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1250 * the caller must ensure that there is sufficient space in the request queue
1251 * and Tx Data FIFO.
1252 *
1253 * For an OUT transfer in Slave mode, it loads a data packet into the
1254 * appropriate FIFO. If necessary, additional data packets are loaded in the
1255 * Host ISR.
1256 *
1257 * For an IN transfer in Slave mode, a data packet is requested. The data
1258 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1259 * additional data packets are requested in the Host ISR.
1260 *
1261 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1262 * register along with a packet count of 1 and the channel is enabled. This
1263 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1264 * simply set to 0 since no data transfer occurs in this case.
1265 *
1266 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1267 * all the information required to perform the subsequent data transfer. In
1268 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1269 * controller performs the entire PING protocol, then starts the data
1270 * transfer.
1271 */
1272void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1273 struct dwc2_host_chan *chan)
1274{
1275 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1276 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1277 u32 hcchar;
1278 u32 hctsiz = 0;
1279 u16 num_packets;
1280
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001281 if (dbg_hc(chan))
1282 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001283
1284 if (chan->do_ping) {
1285 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001286 if (dbg_hc(chan))
1287 dev_vdbg(hsotg->dev, "ping, no DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001288 dwc2_hc_do_ping(hsotg, chan);
1289 chan->xfer_started = 1;
1290 return;
1291 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001292 if (dbg_hc(chan))
1293 dev_vdbg(hsotg->dev, "ping, DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001294 hctsiz |= TSIZ_DOPNG;
1295 }
1296 }
1297
1298 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001299 if (dbg_hc(chan))
1300 dev_vdbg(hsotg->dev, "split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001301 num_packets = 1;
1302
1303 if (chan->complete_split && !chan->ep_is_in)
1304 /*
1305 * For CSPLIT OUT Transfer, set the size to 0 so the
1306 * core doesn't expect any data written to the FIFO
1307 */
1308 chan->xfer_len = 0;
1309 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1310 chan->xfer_len = chan->max_packet;
1311 else if (!chan->ep_is_in && chan->xfer_len > 188)
1312 chan->xfer_len = 188;
1313
1314 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1315 TSIZ_XFERSIZE_MASK;
1316 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001317 if (dbg_hc(chan))
1318 dev_vdbg(hsotg->dev, "no split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001319 /*
1320 * Ensure that the transfer length and packet count will fit
1321 * in the widths allocated for them in the HCTSIZn register
1322 */
1323 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1324 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1325 /*
1326 * Make sure the transfer size is no larger than one
1327 * (micro)frame's worth of data. (A check was done
1328 * when the periodic transfer was accepted to ensure
1329 * that a (micro)frame's worth of data can be
1330 * programmed into a channel.)
1331 */
1332 u32 max_periodic_len =
1333 chan->multi_count * chan->max_packet;
1334
1335 if (chan->xfer_len > max_periodic_len)
1336 chan->xfer_len = max_periodic_len;
1337 } else if (chan->xfer_len > max_hc_xfer_size) {
1338 /*
1339 * Make sure that xfer_len is a multiple of max packet
1340 * size
1341 */
1342 chan->xfer_len =
1343 max_hc_xfer_size - chan->max_packet + 1;
1344 }
1345
1346 if (chan->xfer_len > 0) {
1347 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1348 chan->max_packet;
1349 if (num_packets > max_hc_pkt_count) {
1350 num_packets = max_hc_pkt_count;
1351 chan->xfer_len = num_packets * chan->max_packet;
1352 }
1353 } else {
1354 /* Need 1 packet for transfer length of 0 */
1355 num_packets = 1;
1356 }
1357
1358 if (chan->ep_is_in)
1359 /*
1360 * Always program an integral # of max packets for IN
1361 * transfers
1362 */
1363 chan->xfer_len = num_packets * chan->max_packet;
1364
1365 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1366 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1367 /*
1368 * Make sure that the multi_count field matches the
1369 * actual transfer length
1370 */
1371 chan->multi_count = num_packets;
1372
1373 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1374 dwc2_set_pid_isoc(chan);
1375
1376 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1377 TSIZ_XFERSIZE_MASK;
1378 }
1379
1380 chan->start_pkt_count = num_packets;
1381 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1382 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1383 TSIZ_SC_MC_PID_MASK;
1384 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001385 if (dbg_hc(chan)) {
1386 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1387 hctsiz, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001388
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001389 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1390 chan->hc_num);
1391 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1392 hctsiz >> TSIZ_XFERSIZE_SHIFT &
1393 TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
1394 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1395 hctsiz >> TSIZ_PKTCNT_SHIFT &
1396 TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
1397 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1398 hctsiz >> TSIZ_SC_MC_PID_SHIFT &
1399 TSIZ_SC_MC_PID_MASK >> TSIZ_SC_MC_PID_SHIFT);
1400 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001401
1402 if (hsotg->core_params->dma_enable > 0) {
1403 dma_addr_t dma_addr;
1404
1405 if (chan->align_buf) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001406 if (dbg_hc(chan))
1407 dev_vdbg(hsotg->dev, "align_buf\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001408 dma_addr = chan->align_buf;
1409 } else {
1410 dma_addr = chan->xfer_dma;
1411 }
1412 writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001413 if (dbg_hc(chan))
1414 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1415 (unsigned long)dma_addr, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001416 }
1417
1418 /* Start the split */
1419 if (chan->do_split) {
1420 u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1421
1422 hcsplt |= HCSPLT_SPLTENA;
1423 writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1424 }
1425
1426 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1427 hcchar &= ~HCCHAR_MULTICNT_MASK;
1428 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1429 HCCHAR_MULTICNT_MASK;
1430 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1431
1432 if (hcchar & HCCHAR_CHDIS)
1433 dev_warn(hsotg->dev,
1434 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1435 __func__, chan->hc_num, hcchar);
1436
1437 /* Set host channel enable after all other setup is complete */
1438 hcchar |= HCCHAR_CHENA;
1439 hcchar &= ~HCCHAR_CHDIS;
1440
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001441 if (dbg_hc(chan))
1442 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1443 hcchar >> HCCHAR_MULTICNT_SHIFT &
1444 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001445
1446 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001447 if (dbg_hc(chan))
1448 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1449 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001450
1451 chan->xfer_started = 1;
1452 chan->requests++;
1453
1454 if (hsotg->core_params->dma_enable <= 0 &&
1455 !chan->ep_is_in && chan->xfer_len > 0)
1456 /* Load OUT packet into the appropriate Tx FIFO */
1457 dwc2_hc_write_packet(hsotg, chan);
1458}
1459
1460/**
1461 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1462 * host channel and starts the transfer in Descriptor DMA mode
1463 *
1464 * @hsotg: Programming view of DWC_otg controller
1465 * @chan: Information needed to initialize the host channel
1466 *
1467 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1468 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1469 * with micro-frame bitmap.
1470 *
1471 * Initializes HCDMA register with descriptor list address and CTD value then
1472 * starts the transfer via enabling the channel.
1473 */
1474void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1475 struct dwc2_host_chan *chan)
1476{
1477 u32 hcchar;
1478 u32 hc_dma;
1479 u32 hctsiz = 0;
1480
1481 if (chan->do_ping)
1482 hctsiz |= TSIZ_DOPNG;
1483
1484 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1485 dwc2_set_pid_isoc(chan);
1486
1487 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1488 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1489 TSIZ_SC_MC_PID_MASK;
1490
1491 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1492 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1493
1494 /* Non-zero only for high-speed interrupt endpoints */
1495 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1496
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001497 if (dbg_hc(chan)) {
1498 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1499 chan->hc_num);
1500 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1501 chan->data_pid_start);
1502 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1503 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001504
1505 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1506
1507 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1508
1509 /* Always start from first descriptor */
1510 hc_dma &= ~HCDMA_CTD_MASK;
1511 writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001512 if (dbg_hc(chan))
1513 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1514 hc_dma, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001515
1516 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1517 hcchar &= ~HCCHAR_MULTICNT_MASK;
1518 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1519 HCCHAR_MULTICNT_MASK;
1520
1521 if (hcchar & HCCHAR_CHDIS)
1522 dev_warn(hsotg->dev,
1523 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1524 __func__, chan->hc_num, hcchar);
1525
1526 /* Set host channel enable after all other setup is complete */
1527 hcchar |= HCCHAR_CHENA;
1528 hcchar &= ~HCCHAR_CHDIS;
1529
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001530 if (dbg_hc(chan))
1531 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1532 hcchar >> HCCHAR_MULTICNT_SHIFT &
1533 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001534
1535 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001536 if (dbg_hc(chan))
1537 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1538 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001539
1540 chan->xfer_started = 1;
1541 chan->requests++;
1542}
1543
1544/**
1545 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1546 * a previous call to dwc2_hc_start_transfer()
1547 *
1548 * @hsotg: Programming view of DWC_otg controller
1549 * @chan: Information needed to initialize the host channel
1550 *
1551 * The caller must ensure there is sufficient space in the request queue and Tx
1552 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1553 * the controller acts autonomously to complete transfers programmed to a host
1554 * channel.
1555 *
1556 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1557 * if there is any data remaining to be queued. For an IN transfer, another
1558 * data packet is always requested. For the SETUP phase of a control transfer,
1559 * this function does nothing.
1560 *
1561 * Return: 1 if a new request is queued, 0 if no more requests are required
1562 * for this transfer
1563 */
1564int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1565 struct dwc2_host_chan *chan)
1566{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001567 if (dbg_hc(chan))
1568 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1569 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001570
1571 if (chan->do_split)
1572 /* SPLITs always queue just once per channel */
1573 return 0;
1574
1575 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1576 /* SETUPs are queued only once since they can't be NAK'd */
1577 return 0;
1578
1579 if (chan->ep_is_in) {
1580 /*
1581 * Always queue another request for other IN transfers. If
1582 * back-to-back INs are issued and NAKs are received for both,
1583 * the driver may still be processing the first NAK when the
1584 * second NAK is received. When the interrupt handler clears
1585 * the NAK interrupt for the first NAK, the second NAK will
1586 * not be seen. So we can't depend on the NAK interrupt
1587 * handler to requeue a NAK'd request. Instead, IN requests
1588 * are issued each time this function is called. When the
1589 * transfer completes, the extra requests for the channel will
1590 * be flushed.
1591 */
1592 u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1593
1594 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1595 hcchar |= HCCHAR_CHENA;
1596 hcchar &= ~HCCHAR_CHDIS;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001597 if (dbg_hc(chan))
1598 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1599 hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001600 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1601 chan->requests++;
1602 return 1;
1603 }
1604
1605 /* OUT transfers */
1606
1607 if (chan->xfer_count < chan->xfer_len) {
1608 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1609 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1610 u32 hcchar = readl(hsotg->regs +
1611 HCCHAR(chan->hc_num));
1612
1613 dwc2_hc_set_even_odd_frame(hsotg, chan,
1614 &hcchar);
1615 }
1616
1617 /* Load OUT packet into the appropriate Tx FIFO */
1618 dwc2_hc_write_packet(hsotg, chan);
1619 chan->requests++;
1620 return 1;
1621 }
1622
1623 return 0;
1624}
1625
1626/**
1627 * dwc2_hc_do_ping() - Starts a PING transfer
1628 *
1629 * @hsotg: Programming view of DWC_otg controller
1630 * @chan: Information needed to initialize the host channel
1631 *
1632 * This function should only be called in Slave mode. The Do Ping bit is set in
1633 * the HCTSIZ register, then the channel is enabled.
1634 */
1635void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1636{
1637 u32 hcchar;
1638 u32 hctsiz;
1639
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001640 if (dbg_hc(chan))
1641 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1642 chan->hc_num);
1643
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001644
1645 hctsiz = TSIZ_DOPNG;
1646 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1647 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1648
1649 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1650 hcchar |= HCCHAR_CHENA;
1651 hcchar &= ~HCCHAR_CHDIS;
1652 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1653}
1654
1655/**
1656 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1657 * the HFIR register according to PHY type and speed
1658 *
1659 * @hsotg: Programming view of DWC_otg controller
1660 *
1661 * NOTE: The caller can modify the value of the HFIR register only after the
1662 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1663 * has been set
1664 */
1665u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1666{
1667 u32 usbcfg;
1668 u32 hwcfg2;
1669 u32 hprt0;
1670 int clock = 60; /* default value */
1671
1672 usbcfg = readl(hsotg->regs + GUSBCFG);
1673 hwcfg2 = readl(hsotg->regs + GHWCFG2);
1674 hprt0 = readl(hsotg->regs + HPRT0);
1675
1676 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1677 !(usbcfg & GUSBCFG_PHYIF16))
1678 clock = 60;
1679 if ((usbcfg & GUSBCFG_PHYSEL) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
1680 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1681 clock = 48;
1682 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1683 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1684 clock = 30;
1685 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1686 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1687 clock = 60;
1688 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1689 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1690 clock = 48;
1691 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
1692 (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
1693 GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1694 clock = 48;
1695 if ((usbcfg & GUSBCFG_PHYSEL) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
1696 GHWCFG2_FS_PHY_TYPE_DEDICATED)
1697 clock = 48;
1698
Matthijs Kooijman929aea02013-04-29 19:36:48 +00001699 if ((hprt0 & HPRT0_SPD_MASK) == HPRT0_SPD_HIGH_SPEED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001700 /* High speed case */
1701 return 125 * clock;
1702 else
1703 /* FS/LS case */
1704 return 1000 * clock;
1705}
1706
1707/**
1708 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1709 * buffer
1710 *
1711 * @core_if: Programming view of DWC_otg controller
1712 * @dest: Destination buffer for the packet
1713 * @bytes: Number of bytes to copy to the destination
1714 */
1715void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1716{
1717 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
1718 u32 *data_buf = (u32 *)dest;
1719 int word_count = (bytes + 3) / 4;
1720 int i;
1721
1722 /*
1723 * Todo: Account for the case where dest is not dword aligned. This
1724 * requires reading data from the FIFO into a u32 temp buffer, then
1725 * moving it into the data buffer.
1726 */
1727
1728 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1729
1730 for (i = 0; i < word_count; i++, data_buf++)
1731 *data_buf = readl(fifo);
1732}
1733
1734/**
1735 * dwc2_dump_host_registers() - Prints the host registers
1736 *
1737 * @hsotg: Programming view of DWC_otg controller
1738 *
1739 * NOTE: This function will be removed once the peripheral controller code
1740 * is integrated and the driver is stable
1741 */
1742void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1743{
1744#ifdef DEBUG
1745 u32 __iomem *addr;
1746 int i;
1747
1748 dev_dbg(hsotg->dev, "Host Global Registers\n");
1749 addr = hsotg->regs + HCFG;
1750 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
1751 (unsigned long)addr, readl(addr));
1752 addr = hsotg->regs + HFIR;
1753 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
1754 (unsigned long)addr, readl(addr));
1755 addr = hsotg->regs + HFNUM;
1756 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
1757 (unsigned long)addr, readl(addr));
1758 addr = hsotg->regs + HPTXSTS;
1759 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
1760 (unsigned long)addr, readl(addr));
1761 addr = hsotg->regs + HAINT;
1762 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
1763 (unsigned long)addr, readl(addr));
1764 addr = hsotg->regs + HAINTMSK;
1765 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
1766 (unsigned long)addr, readl(addr));
1767 if (hsotg->core_params->dma_desc_enable > 0) {
1768 addr = hsotg->regs + HFLBADDR;
1769 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1770 (unsigned long)addr, readl(addr));
1771 }
1772
1773 addr = hsotg->regs + HPRT0;
1774 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
1775 (unsigned long)addr, readl(addr));
1776
1777 for (i = 0; i < hsotg->core_params->host_channels; i++) {
1778 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1779 addr = hsotg->regs + HCCHAR(i);
1780 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
1781 (unsigned long)addr, readl(addr));
1782 addr = hsotg->regs + HCSPLT(i);
1783 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
1784 (unsigned long)addr, readl(addr));
1785 addr = hsotg->regs + HCINT(i);
1786 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
1787 (unsigned long)addr, readl(addr));
1788 addr = hsotg->regs + HCINTMSK(i);
1789 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
1790 (unsigned long)addr, readl(addr));
1791 addr = hsotg->regs + HCTSIZ(i);
1792 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
1793 (unsigned long)addr, readl(addr));
1794 addr = hsotg->regs + HCDMA(i);
1795 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
1796 (unsigned long)addr, readl(addr));
1797 if (hsotg->core_params->dma_desc_enable > 0) {
1798 addr = hsotg->regs + HCDMAB(i);
1799 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
1800 (unsigned long)addr, readl(addr));
1801 }
1802 }
1803#endif
1804}
1805
1806/**
1807 * dwc2_dump_global_registers() - Prints the core global registers
1808 *
1809 * @hsotg: Programming view of DWC_otg controller
1810 *
1811 * NOTE: This function will be removed once the peripheral controller code
1812 * is integrated and the driver is stable
1813 */
1814void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1815{
1816#ifdef DEBUG
1817 u32 __iomem *addr;
1818 int i, ep_num;
1819 char *txfsiz;
1820
1821 dev_dbg(hsotg->dev, "Core Global Registers\n");
1822 addr = hsotg->regs + GOTGCTL;
1823 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
1824 (unsigned long)addr, readl(addr));
1825 addr = hsotg->regs + GOTGINT;
1826 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
1827 (unsigned long)addr, readl(addr));
1828 addr = hsotg->regs + GAHBCFG;
1829 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
1830 (unsigned long)addr, readl(addr));
1831 addr = hsotg->regs + GUSBCFG;
1832 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
1833 (unsigned long)addr, readl(addr));
1834 addr = hsotg->regs + GRSTCTL;
1835 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
1836 (unsigned long)addr, readl(addr));
1837 addr = hsotg->regs + GINTSTS;
1838 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
1839 (unsigned long)addr, readl(addr));
1840 addr = hsotg->regs + GINTMSK;
1841 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
1842 (unsigned long)addr, readl(addr));
1843 addr = hsotg->regs + GRXSTSR;
1844 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
1845 (unsigned long)addr, readl(addr));
1846 addr = hsotg->regs + GRXFSIZ;
1847 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
1848 (unsigned long)addr, readl(addr));
1849 addr = hsotg->regs + GNPTXFSIZ;
1850 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
1851 (unsigned long)addr, readl(addr));
1852 addr = hsotg->regs + GNPTXSTS;
1853 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
1854 (unsigned long)addr, readl(addr));
1855 addr = hsotg->regs + GI2CCTL;
1856 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
1857 (unsigned long)addr, readl(addr));
1858 addr = hsotg->regs + GPVNDCTL;
1859 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
1860 (unsigned long)addr, readl(addr));
1861 addr = hsotg->regs + GGPIO;
1862 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
1863 (unsigned long)addr, readl(addr));
1864 addr = hsotg->regs + GUID;
1865 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
1866 (unsigned long)addr, readl(addr));
1867 addr = hsotg->regs + GSNPSID;
1868 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
1869 (unsigned long)addr, readl(addr));
1870 addr = hsotg->regs + GHWCFG1;
1871 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
1872 (unsigned long)addr, readl(addr));
1873 addr = hsotg->regs + GHWCFG2;
1874 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
1875 (unsigned long)addr, readl(addr));
1876 addr = hsotg->regs + GHWCFG3;
1877 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
1878 (unsigned long)addr, readl(addr));
1879 addr = hsotg->regs + GHWCFG4;
1880 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
1881 (unsigned long)addr, readl(addr));
1882 addr = hsotg->regs + GLPMCFG;
1883 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
1884 (unsigned long)addr, readl(addr));
1885 addr = hsotg->regs + GPWRDN;
1886 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
1887 (unsigned long)addr, readl(addr));
1888 addr = hsotg->regs + GDFIFOCFG;
1889 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
1890 (unsigned long)addr, readl(addr));
1891 addr = hsotg->regs + HPTXFSIZ;
1892 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
1893 (unsigned long)addr, readl(addr));
1894
1895 if (hsotg->core_params->en_multiple_tx_fifo <= 0) {
1896 ep_num = hsotg->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT &
1897 GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >>
1898 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
1899 txfsiz = "DPTXFSIZ";
1900 } else {
1901 ep_num = hsotg->hwcfg4 >> GHWCFG4_NUM_IN_EPS_SHIFT &
1902 GHWCFG4_NUM_IN_EPS_MASK >> GHWCFG4_NUM_IN_EPS_SHIFT;
1903 txfsiz = "DIENPTXF";
1904 }
1905
1906 for (i = 0; i < ep_num; i++) {
1907 addr = hsotg->regs + DPTXFSIZN(i + 1);
1908 dev_dbg(hsotg->dev, "%s[%d] @0x%08lX : 0x%08X\n", txfsiz, i + 1,
1909 (unsigned long)addr, readl(addr));
1910 }
1911
1912 addr = hsotg->regs + PCGCTL;
1913 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
1914 (unsigned long)addr, readl(addr));
1915#endif
1916}
1917
1918/**
1919 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1920 *
1921 * @hsotg: Programming view of DWC_otg controller
1922 * @num: Tx FIFO to flush
1923 */
1924void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1925{
1926 u32 greset;
1927 int count = 0;
1928
1929 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1930
1931 greset = GRSTCTL_TXFFLSH;
1932 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1933 writel(greset, hsotg->regs + GRSTCTL);
1934
1935 do {
1936 greset = readl(hsotg->regs + GRSTCTL);
1937 if (++count > 10000) {
1938 dev_warn(hsotg->dev,
1939 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1940 __func__, greset,
1941 readl(hsotg->regs + GNPTXSTS));
1942 break;
1943 }
1944 udelay(1);
1945 } while (greset & GRSTCTL_TXFFLSH);
1946
1947 /* Wait for at least 3 PHY Clocks */
1948 udelay(1);
1949}
1950
1951/**
1952 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1953 *
1954 * @hsotg: Programming view of DWC_otg controller
1955 */
1956void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1957{
1958 u32 greset;
1959 int count = 0;
1960
1961 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1962
1963 greset = GRSTCTL_RXFFLSH;
1964 writel(greset, hsotg->regs + GRSTCTL);
1965
1966 do {
1967 greset = readl(hsotg->regs + GRSTCTL);
1968 if (++count > 10000) {
1969 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1970 __func__, greset);
1971 break;
1972 }
1973 udelay(1);
1974 } while (greset & GRSTCTL_RXFFLSH);
1975
1976 /* Wait for at least 3 PHY Clocks */
1977 udelay(1);
1978}
1979
1980#define DWC2_PARAM_TEST(a, b, c) ((a) < (b) || (a) > (c))
1981
1982/* Parameter access functions */
1983int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1984{
1985 int valid = 1;
1986 int retval = 0;
1987 u32 op_mode;
1988
1989 op_mode = hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK;
1990
1991 switch (val) {
1992 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
1993 if (op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
1994 valid = 0;
1995 break;
1996 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
1997 switch (op_mode) {
1998 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1999 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2000 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2001 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2002 break;
2003 default:
2004 valid = 0;
2005 break;
2006 }
2007 break;
2008 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2009 /* always valid */
2010 break;
2011 default:
2012 valid = 0;
2013 break;
2014 }
2015
2016 if (!valid) {
2017 if (val >= 0)
2018 dev_err(hsotg->dev,
2019 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2020 val);
2021 switch (op_mode) {
2022 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2023 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2024 break;
2025 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2026 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2027 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2028 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2029 break;
2030 default:
2031 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2032 break;
2033 }
2034 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2035 retval = -EINVAL;
2036 }
2037
2038 hsotg->core_params->otg_cap = val;
2039 return retval;
2040}
2041
2042int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2043{
2044 int valid = 1;
2045 int retval = 0;
2046
2047 if (val > 0 && (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) ==
2048 GHWCFG2_SLAVE_ONLY_ARCH)
2049 valid = 0;
2050 if (val < 0)
2051 valid = 0;
2052
2053 if (!valid) {
2054 if (val >= 0)
2055 dev_err(hsotg->dev,
2056 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2057 val);
2058 val = (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) !=
2059 GHWCFG2_SLAVE_ONLY_ARCH;
2060 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2061 retval = -EINVAL;
2062 }
2063
2064 hsotg->core_params->dma_enable = val;
2065 return retval;
2066}
2067
2068int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2069{
2070 int valid = 1;
2071 int retval = 0;
2072
2073 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2074 !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA)))
2075 valid = 0;
2076 if (val < 0)
2077 valid = 0;
2078
2079 if (!valid) {
2080 if (val >= 0)
2081 dev_err(hsotg->dev,
2082 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2083 val);
2084 val = (hsotg->core_params->dma_enable > 0 &&
2085 (hsotg->hwcfg4 & GHWCFG4_DESC_DMA));
2086 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2087 retval = -EINVAL;
2088 }
2089
2090 hsotg->core_params->dma_desc_enable = val;
2091 return retval;
2092}
2093
2094int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2095 int val)
2096{
2097 int retval = 0;
2098
2099 if (DWC2_PARAM_TEST(val, 0, 1)) {
2100 if (val >= 0) {
2101 dev_err(hsotg->dev,
2102 "Wrong value for host_support_fs_low_power\n");
2103 dev_err(hsotg->dev,
2104 "host_support_fs_low_power must be 0 or 1\n");
2105 }
2106 val = 0;
2107 dev_dbg(hsotg->dev,
2108 "Setting host_support_fs_low_power to %d\n", val);
2109 retval = -EINVAL;
2110 }
2111
2112 hsotg->core_params->host_support_fs_ls_low_power = val;
2113 return retval;
2114}
2115
2116int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2117{
2118 int valid = 1;
2119 int retval = 0;
2120
2121 if (val > 0 && !(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO))
2122 valid = 0;
2123 if (val < 0)
2124 valid = 0;
2125
2126 if (!valid) {
2127 if (val >= 0)
2128 dev_err(hsotg->dev,
2129 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2130 val);
2131 val = !!(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2132 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2133 retval = -EINVAL;
2134 }
2135
2136 hsotg->core_params->enable_dynamic_fifo = val;
2137 return retval;
2138}
2139
2140int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2141{
2142 int valid = 1;
2143 int retval = 0;
2144
2145 if (val < 16 || val > readl(hsotg->regs + GRXFSIZ))
2146 valid = 0;
2147
2148 if (!valid) {
2149 if (val >= 0)
2150 dev_err(hsotg->dev,
2151 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2152 val);
2153 val = readl(hsotg->regs + GRXFSIZ);
2154 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2155 retval = -EINVAL;
2156 }
2157
2158 hsotg->core_params->host_rx_fifo_size = val;
2159 return retval;
2160}
2161
2162int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2163{
2164 int valid = 1;
2165 int retval = 0;
2166
2167 if (val < 16 || val > (readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff))
2168 valid = 0;
2169
2170 if (!valid) {
2171 if (val >= 0)
2172 dev_err(hsotg->dev,
2173 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2174 val);
2175 val = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
2176 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2177 val);
2178 retval = -EINVAL;
2179 }
2180
2181 hsotg->core_params->host_nperio_tx_fifo_size = val;
2182 return retval;
2183}
2184
2185int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2186{
2187 int valid = 1;
2188 int retval = 0;
2189
2190 if (val < 16 || val > (hsotg->hptxfsiz >> 16))
2191 valid = 0;
2192
2193 if (!valid) {
2194 if (val >= 0)
2195 dev_err(hsotg->dev,
2196 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2197 val);
2198 val = hsotg->hptxfsiz >> 16;
2199 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2200 val);
2201 retval = -EINVAL;
2202 }
2203
2204 hsotg->core_params->host_perio_tx_fifo_size = val;
2205 return retval;
2206}
2207
2208int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2209{
2210 int valid = 1;
2211 int retval = 0;
2212 int width = hsotg->hwcfg3 >> GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT &
2213 GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
2214 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2215
2216 if (val < 2047 || val >= (1 << (width + 11)))
2217 valid = 0;
2218
2219 if (!valid) {
2220 if (val >= 0)
2221 dev_err(hsotg->dev,
2222 "%d invalid for max_transfer_size. Check HW configuration.\n",
2223 val);
2224 val = (1 << (width + 11)) - 1;
2225 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2226 retval = -EINVAL;
2227 }
2228
2229 hsotg->core_params->max_transfer_size = val;
2230 return retval;
2231}
2232
2233int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2234{
2235 int valid = 1;
2236 int retval = 0;
2237 int width = hsotg->hwcfg3 >> GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT &
2238 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK >>
2239 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2240
2241 if (val < 15 || val > (1 << (width + 4)))
2242 valid = 0;
2243
2244 if (!valid) {
2245 if (val >= 0)
2246 dev_err(hsotg->dev,
2247 "%d invalid for max_packet_count. Check HW configuration.\n",
2248 val);
2249 val = (1 << (width + 4)) - 1;
2250 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2251 retval = -EINVAL;
2252 }
2253
2254 hsotg->core_params->max_packet_count = val;
2255 return retval;
2256}
2257
2258int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2259{
2260 int valid = 1;
2261 int retval = 0;
2262 int num_chan = hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
2263 GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT;
2264
2265 if (val < 1 || val > num_chan + 1)
2266 valid = 0;
2267
2268 if (!valid) {
2269 if (val >= 0)
2270 dev_err(hsotg->dev,
2271 "%d invalid for host_channels. Check HW configuration.\n",
2272 val);
2273 val = num_chan + 1;
2274 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2275 retval = -EINVAL;
2276 }
2277
2278 hsotg->core_params->host_channels = val;
2279 return retval;
2280}
2281
2282int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2283{
2284#ifndef NO_FS_PHY_HW_CHECKS
2285 int valid = 0;
2286 u32 hs_phy_type;
2287 u32 fs_phy_type;
2288#endif
2289 int retval = 0;
2290
2291 if (DWC2_PARAM_TEST(val, DWC2_PHY_TYPE_PARAM_FS,
2292 DWC2_PHY_TYPE_PARAM_ULPI)) {
2293 if (val >= 0) {
2294 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2295 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2296 }
2297
2298#ifndef NO_FS_PHY_HW_CHECKS
2299 valid = 0;
2300#else
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002301 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002302 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2303 retval = -EINVAL;
2304#endif
2305 }
2306
2307#ifndef NO_FS_PHY_HW_CHECKS
2308 hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
2309 fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
2310
2311 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2312 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2313 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2314 valid = 1;
2315 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2316 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2317 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2318 valid = 1;
2319 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2320 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2321 valid = 1;
2322
2323 if (!valid) {
2324 if (val >= 0)
2325 dev_err(hsotg->dev,
2326 "%d invalid for phy_type. Check HW configuration.\n",
2327 val);
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002328 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002329 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2330 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2331 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2332 val = DWC2_PHY_TYPE_PARAM_UTMI;
2333 else
2334 val = DWC2_PHY_TYPE_PARAM_ULPI;
2335 }
2336 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2337 retval = -EINVAL;
2338 }
2339#endif
2340
2341 hsotg->core_params->phy_type = val;
2342 return retval;
2343}
2344
2345static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2346{
2347 return hsotg->core_params->phy_type;
2348}
2349
2350int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2351{
2352 int valid = 1;
2353 int retval = 0;
2354
2355 if (DWC2_PARAM_TEST(val, 0, 1)) {
2356 if (val >= 0) {
2357 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2358 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2359 }
2360 valid = 0;
2361 }
2362
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002363 if (val == DWC2_SPEED_PARAM_HIGH &&
2364 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002365 valid = 0;
2366
2367 if (!valid) {
2368 if (val >= 0)
2369 dev_err(hsotg->dev,
2370 "%d invalid for speed parameter. Check HW configuration.\n",
2371 val);
2372 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002373 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002374 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2375 retval = -EINVAL;
2376 }
2377
2378 hsotg->core_params->speed = val;
2379 return retval;
2380}
2381
2382int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2383{
2384 int valid = 1;
2385 int retval = 0;
2386
2387 if (DWC2_PARAM_TEST(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2388 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2389 if (val >= 0) {
2390 dev_err(hsotg->dev,
2391 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2392 dev_err(hsotg->dev,
2393 "host_ls_low_power_phy_clk must be 0 or 1\n");
2394 }
2395 valid = 0;
2396 }
2397
2398 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2399 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2400 valid = 0;
2401
2402 if (!valid) {
2403 if (val >= 0)
2404 dev_err(hsotg->dev,
2405 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2406 val);
2407 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2408 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2409 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2410 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2411 val);
2412 retval = -EINVAL;
2413 }
2414
2415 hsotg->core_params->host_ls_low_power_phy_clk = val;
2416 return retval;
2417}
2418
2419int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2420{
2421 int retval = 0;
2422
2423 if (DWC2_PARAM_TEST(val, 0, 1)) {
2424 if (val >= 0) {
2425 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2426 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2427 }
2428 val = 0;
2429 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2430 retval = -EINVAL;
2431 }
2432
2433 hsotg->core_params->phy_ulpi_ddr = val;
2434 return retval;
2435}
2436
2437int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2438{
2439 int retval = 0;
2440
2441 if (DWC2_PARAM_TEST(val, 0, 1)) {
2442 if (val >= 0) {
2443 dev_err(hsotg->dev,
2444 "Wrong value for phy_ulpi_ext_vbus\n");
2445 dev_err(hsotg->dev,
2446 "phy_ulpi_ext_vbus must be 0 or 1\n");
2447 }
2448 val = 0;
2449 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2450 retval = -EINVAL;
2451 }
2452
2453 hsotg->core_params->phy_ulpi_ext_vbus = val;
2454 return retval;
2455}
2456
2457int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2458{
2459 int retval = 0;
2460
2461 if (DWC2_PARAM_TEST(val, 8, 8) && DWC2_PARAM_TEST(val, 16, 16)) {
2462 if (val >= 0) {
2463 dev_err(hsotg->dev, "Wrong value for phy_utmi_width\n");
2464 dev_err(hsotg->dev, "phy_utmi_width must be 8 or 16\n");
2465 }
2466 val = 8;
2467 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2468 retval = -EINVAL;
2469 }
2470
2471 hsotg->core_params->phy_utmi_width = val;
2472 return retval;
2473}
2474
2475int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2476{
2477 int retval = 0;
2478
2479 if (DWC2_PARAM_TEST(val, 0, 1)) {
2480 if (val >= 0) {
2481 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2482 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2483 }
2484 val = 0;
2485 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2486 retval = -EINVAL;
2487 }
2488
2489 hsotg->core_params->ulpi_fs_ls = val;
2490 return retval;
2491}
2492
2493int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2494{
2495 int retval = 0;
2496
2497 if (DWC2_PARAM_TEST(val, 0, 1)) {
2498 if (val >= 0) {
2499 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2500 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2501 }
2502 val = 0;
2503 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2504 retval = -EINVAL;
2505 }
2506
2507 hsotg->core_params->ts_dline = val;
2508 return retval;
2509}
2510
2511int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2512{
2513#ifndef NO_FS_PHY_HW_CHECKS
2514 int valid = 1;
2515#endif
2516 int retval = 0;
2517
2518 if (DWC2_PARAM_TEST(val, 0, 1)) {
2519 if (val >= 0) {
2520 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2521 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2522 }
2523
2524#ifndef NO_FS_PHY_HW_CHECKS
2525 valid = 0;
2526#else
2527 val = 0;
2528 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2529 retval = -EINVAL;
2530#endif
2531 }
2532
2533#ifndef NO_FS_PHY_HW_CHECKS
2534 if (val == 1 && !(hsotg->hwcfg3 & GHWCFG3_I2C))
2535 valid = 0;
2536
2537 if (!valid) {
2538 if (val >= 0)
2539 dev_err(hsotg->dev,
2540 "%d invalid for i2c_enable. Check HW configuration.\n",
2541 val);
2542 val = !!(hsotg->hwcfg3 & GHWCFG3_I2C);
2543 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2544 retval = -EINVAL;
2545 }
2546#endif
2547
2548 hsotg->core_params->i2c_enable = val;
2549 return retval;
2550}
2551
2552int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2553{
2554 int valid = 1;
2555 int retval = 0;
2556
2557 if (DWC2_PARAM_TEST(val, 0, 1)) {
2558 if (val >= 0) {
2559 dev_err(hsotg->dev,
2560 "Wrong value for en_multiple_tx_fifo,\n");
2561 dev_err(hsotg->dev,
2562 "en_multiple_tx_fifo must be 0 or 1\n");
2563 }
2564 valid = 0;
2565 }
2566
2567 if (val == 1 && !(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN))
2568 valid = 0;
2569
2570 if (!valid) {
2571 if (val >= 0)
2572 dev_err(hsotg->dev,
2573 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2574 val);
2575 val = !!(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN);
2576 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2577 retval = -EINVAL;
2578 }
2579
2580 hsotg->core_params->en_multiple_tx_fifo = val;
2581 return retval;
2582}
2583
2584int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2585{
2586 int valid = 1;
2587 int retval = 0;
2588
2589 if (DWC2_PARAM_TEST(val, 0, 1)) {
2590 if (val >= 0) {
2591 dev_err(hsotg->dev,
2592 "'%d' invalid for parameter reload_ctl\n", val);
2593 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2594 }
2595 valid = 0;
2596 }
2597
2598 if (val == 1 && hsotg->snpsid < DWC2_CORE_REV_2_92a)
2599 valid = 0;
2600
2601 if (!valid) {
2602 if (val >= 0)
2603 dev_err(hsotg->dev,
2604 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2605 val);
2606 val = hsotg->snpsid >= DWC2_CORE_REV_2_92a;
2607 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2608 retval = -EINVAL;
2609 }
2610
2611 hsotg->core_params->reload_ctl = val;
2612 return retval;
2613}
2614
2615int dwc2_set_param_ahb_single(struct dwc2_hsotg *hsotg, int val)
2616{
2617 int valid = 1;
2618 int retval = 0;
2619
2620 if (DWC2_PARAM_TEST(val, 0, 1)) {
2621 if (val >= 0) {
2622 dev_err(hsotg->dev,
2623 "'%d' invalid for parameter ahb_single\n", val);
2624 dev_err(hsotg->dev, "ahb_single must be 0 or 1\n");
2625 }
2626 valid = 0;
2627 }
2628
2629 if (val > 0 && hsotg->snpsid < DWC2_CORE_REV_2_94a)
2630 valid = 0;
2631
2632 if (!valid) {
2633 if (val >= 0)
2634 dev_err(hsotg->dev,
2635 "%d invalid for parameter ahb_single. Check HW configuration.\n",
2636 val);
2637 val = 0;
2638 dev_dbg(hsotg->dev, "Setting ahb_single to %d\n", val);
2639 retval = -EINVAL;
2640 }
2641
2642 hsotg->core_params->ahb_single = val;
2643 return retval;
2644}
2645
2646int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2647{
2648 int retval = 0;
2649
2650 if (DWC2_PARAM_TEST(val, 0, 1)) {
2651 if (val >= 0) {
2652 dev_err(hsotg->dev,
2653 "'%d' invalid for parameter otg_ver\n", val);
2654 dev_err(hsotg->dev,
2655 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2656 }
2657 val = 0;
2658 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2659 retval = -EINVAL;
2660 }
2661
2662 hsotg->core_params->otg_ver = val;
2663 return retval;
2664}
2665
2666/*
2667 * This function is called during module intialization to pass module parameters
2668 * for the DWC_otg core. It returns non-0 if any parameters are invalid.
2669 */
2670int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
2671 struct dwc2_core_params *params)
2672{
2673 int retval = 0;
2674
2675 dev_dbg(hsotg->dev, "%s()\n", __func__);
2676
2677 retval |= dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2678 retval |= dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2679 retval |= dwc2_set_param_dma_desc_enable(hsotg,
2680 params->dma_desc_enable);
2681 retval |= dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2682 params->host_support_fs_ls_low_power);
2683 retval |= dwc2_set_param_enable_dynamic_fifo(hsotg,
2684 params->enable_dynamic_fifo);
2685 retval |= dwc2_set_param_host_rx_fifo_size(hsotg,
2686 params->host_rx_fifo_size);
2687 retval |= dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2688 params->host_nperio_tx_fifo_size);
2689 retval |= dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2690 params->host_perio_tx_fifo_size);
2691 retval |= dwc2_set_param_max_transfer_size(hsotg,
2692 params->max_transfer_size);
2693 retval |= dwc2_set_param_max_packet_count(hsotg,
2694 params->max_packet_count);
2695 retval |= dwc2_set_param_host_channels(hsotg, params->host_channels);
2696 retval |= dwc2_set_param_phy_type(hsotg, params->phy_type);
2697 retval |= dwc2_set_param_speed(hsotg, params->speed);
2698 retval |= dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2699 params->host_ls_low_power_phy_clk);
2700 retval |= dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2701 retval |= dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2702 params->phy_ulpi_ext_vbus);
2703 retval |= dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2704 retval |= dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2705 retval |= dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2706 retval |= dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2707 retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
2708 params->en_multiple_tx_fifo);
2709 retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
2710 retval |= dwc2_set_param_ahb_single(hsotg, params->ahb_single);
2711 retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2712
2713 return retval;
2714}
2715
2716u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2717{
2718 return (u16)(hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103);
2719}
2720
2721int dwc2_check_core_status(struct dwc2_hsotg *hsotg)
2722{
2723 if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
2724 return -1;
2725 else
2726 return 0;
2727}
2728
2729/**
2730 * dwc2_enable_global_interrupts() - Enables the controller's Global
2731 * Interrupt in the AHB Config register
2732 *
2733 * @hsotg: Programming view of DWC_otg controller
2734 */
2735void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2736{
2737 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2738
2739 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2740 writel(ahbcfg, hsotg->regs + GAHBCFG);
2741}
2742
2743/**
2744 * dwc2_disable_global_interrupts() - Disables the controller's Global
2745 * Interrupt in the AHB Config register
2746 *
2747 * @hsotg: Programming view of DWC_otg controller
2748 */
2749void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2750{
2751 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2752
2753 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2754 writel(ahbcfg, hsotg->regs + GAHBCFG);
2755}
2756
2757MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
2758MODULE_AUTHOR("Synopsys, Inc.");
2759MODULE_LICENSE("Dual BSD/GPL");