blob: 9eabebbc16b8db182d1d75a8986d10792ec03995 [file] [log] [blame]
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001/*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/dma-mapping.h>
48#include <linux/delay.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/usb.h>
52
53#include <linux/usb/hcd.h>
54#include <linux/usb/ch11.h>
55
56#include "core.h"
57#include "hcd.h"
58
59/**
60 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
61 * used in both device and host modes
62 *
63 * @hsotg: Programming view of the DWC_otg controller
64 */
65static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
66{
67 u32 intmsk;
68
69 /* Clear any pending OTG Interrupts */
70 writel(0xffffffff, hsotg->regs + GOTGINT);
71
72 /* Clear any pending interrupts */
73 writel(0xffffffff, hsotg->regs + GINTSTS);
74
75 /* Enable the interrupts in the GINTMSK */
76 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
77
78 if (hsotg->core_params->dma_enable <= 0)
79 intmsk |= GINTSTS_RXFLVL;
80
81 intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
82 GINTSTS_SESSREQINT;
83
84 writel(intmsk, hsotg->regs + GINTMSK);
85}
86
87/*
88 * Initializes the FSLSPClkSel field of the HCFG register depending on the
89 * PHY type
90 */
91static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
92{
Matthijs Kooijmanf9234632013-08-30 18:45:13 +020093 u32 hs_phy_type = (hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
94 GHWCFG2_HS_PHY_TYPE_SHIFT;
95 u32 fs_phy_type = (hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
96 GHWCFG2_FS_PHY_TYPE_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -070097 u32 hcfg, val;
98
99 if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
100 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
101 hsotg->core_params->ulpi_fs_ls > 0) ||
102 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
103 /* Full speed PHY */
104 val = HCFG_FSLSPCLKSEL_48_MHZ;
105 } else {
106 /* High speed PHY running at full speed or high speed */
107 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
108 }
109
110 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
111 hcfg = readl(hsotg->regs + HCFG);
112 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200113 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700114 writel(hcfg, hsotg->regs + HCFG);
115}
116
117/*
118 * Do core a soft reset of the core. Be careful with this because it
119 * resets all the internal state machines of the core.
120 */
121static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
122{
123 u32 greset;
124 int count = 0;
125
126 dev_vdbg(hsotg->dev, "%s()\n", __func__);
127
128 /* Wait for AHB master IDLE state */
129 do {
130 usleep_range(20000, 40000);
131 greset = readl(hsotg->regs + GRSTCTL);
132 if (++count > 50) {
133 dev_warn(hsotg->dev,
134 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
135 __func__, greset);
136 return;
137 }
138 } while (!(greset & GRSTCTL_AHBIDLE));
139
140 /* Core Soft Reset */
141 count = 0;
142 greset |= GRSTCTL_CSFTRST;
143 writel(greset, hsotg->regs + GRSTCTL);
144 do {
145 usleep_range(20000, 40000);
146 greset = readl(hsotg->regs + GRSTCTL);
147 if (++count > 50) {
148 dev_warn(hsotg->dev,
149 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
150 __func__, greset);
151 break;
152 }
153 } while (greset & GRSTCTL_CSFTRST);
154
155 /*
156 * NOTE: This long sleep is _very_ important, otherwise the core will
157 * not stay in host mode after a connector ID change!
158 */
159 usleep_range(150000, 200000);
160}
161
162static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
163{
164 u32 usbcfg, i2cctl;
165
166 /*
167 * core_init() is now called on every switch so only call the
168 * following for the first time through
169 */
170 if (select_phy) {
171 dev_dbg(hsotg->dev, "FS PHY selected\n");
172 usbcfg = readl(hsotg->regs + GUSBCFG);
173 usbcfg |= GUSBCFG_PHYSEL;
174 writel(usbcfg, hsotg->regs + GUSBCFG);
175
176 /* Reset after a PHY select */
177 dwc2_core_reset(hsotg);
178 }
179
180 /*
181 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
182 * do this on HNP Dev/Host mode switches (done in dev_init and
183 * host_init).
184 */
185 if (dwc2_is_host_mode(hsotg))
186 dwc2_init_fs_ls_pclk_sel(hsotg);
187
188 if (hsotg->core_params->i2c_enable > 0) {
189 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
190
191 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
192 usbcfg = readl(hsotg->regs + GUSBCFG);
193 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
194 writel(usbcfg, hsotg->regs + GUSBCFG);
195
196 /* Program GI2CCTL.I2CEn */
197 i2cctl = readl(hsotg->regs + GI2CCTL);
198 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
199 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
200 i2cctl &= ~GI2CCTL_I2CEN;
201 writel(i2cctl, hsotg->regs + GI2CCTL);
202 i2cctl |= GI2CCTL_I2CEN;
203 writel(i2cctl, hsotg->regs + GI2CCTL);
204 }
205}
206
207static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
208{
209 u32 usbcfg;
210
211 if (!select_phy)
212 return;
213
214 usbcfg = readl(hsotg->regs + GUSBCFG);
215
216 /*
217 * HS PHY parameters. These parameters are preserved during soft reset
218 * so only program the first time. Do a soft reset immediately after
219 * setting phyif.
220 */
221 switch (hsotg->core_params->phy_type) {
222 case DWC2_PHY_TYPE_PARAM_ULPI:
223 /* ULPI interface */
224 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
225 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
226 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
227 if (hsotg->core_params->phy_ulpi_ddr > 0)
228 usbcfg |= GUSBCFG_DDRSEL;
229 break;
230 case DWC2_PHY_TYPE_PARAM_UTMI:
231 /* UTMI+ interface */
232 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
233 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
234 if (hsotg->core_params->phy_utmi_width == 16)
235 usbcfg |= GUSBCFG_PHYIF16;
236 break;
237 default:
238 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
239 break;
240 }
241
242 writel(usbcfg, hsotg->regs + GUSBCFG);
243
244 /* Reset after setting the PHY parameters */
245 dwc2_core_reset(hsotg);
246}
247
248static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
249{
250 u32 usbcfg, hs_phy_type, fs_phy_type;
251
252 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
253 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
254 /* If FS mode with FS PHY */
255 dwc2_fs_phy_init(hsotg, select_phy);
256 } else {
257 /* High speed PHY */
258 dwc2_hs_phy_init(hsotg, select_phy);
259 }
260
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200261 hs_phy_type = (hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
262 GHWCFG2_HS_PHY_TYPE_SHIFT;
263 fs_phy_type = (hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
264 GHWCFG2_FS_PHY_TYPE_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700265
266 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
267 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
268 hsotg->core_params->ulpi_fs_ls > 0) {
269 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
270 usbcfg = readl(hsotg->regs + GUSBCFG);
271 usbcfg |= GUSBCFG_ULPI_FS_LS;
272 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
273 writel(usbcfg, hsotg->regs + GUSBCFG);
274 } else {
275 usbcfg = readl(hsotg->regs + GUSBCFG);
276 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
277 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
278 writel(usbcfg, hsotg->regs + GUSBCFG);
279 }
280}
281
282static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
283{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700284 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700285
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200286 switch ((hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
287 GHWCFG2_ARCHITECTURE_SHIFT) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700288 case GHWCFG2_EXT_DMA_ARCH:
289 dev_err(hsotg->dev, "External DMA Mode not supported\n");
290 return -EINVAL;
291
292 case GHWCFG2_INT_DMA_ARCH:
293 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700294 if (hsotg->core_params->ahbcfg != -1) {
295 ahbcfg &= GAHBCFG_CTRL_MASK;
296 ahbcfg |= hsotg->core_params->ahbcfg &
297 ~GAHBCFG_CTRL_MASK;
298 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700299 break;
300
301 case GHWCFG2_SLAVE_ONLY_ARCH:
302 default:
303 dev_dbg(hsotg->dev, "Slave Only Mode\n");
304 break;
305 }
306
307 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
308 hsotg->core_params->dma_enable,
309 hsotg->core_params->dma_desc_enable);
310
311 if (hsotg->core_params->dma_enable > 0) {
312 if (hsotg->core_params->dma_desc_enable > 0)
313 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
314 else
315 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
316 } else {
317 dev_dbg(hsotg->dev, "Using Slave mode\n");
318 hsotg->core_params->dma_desc_enable = 0;
319 }
320
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700321 if (hsotg->core_params->dma_enable > 0)
322 ahbcfg |= GAHBCFG_DMA_EN;
323
324 writel(ahbcfg, hsotg->regs + GAHBCFG);
325
326 return 0;
327}
328
329static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
330{
331 u32 usbcfg;
332
333 usbcfg = readl(hsotg->regs + GUSBCFG);
334 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
335
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200336 switch ((hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
337 GHWCFG2_OP_MODE_SHIFT) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700338 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
339 if (hsotg->core_params->otg_cap ==
340 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
341 usbcfg |= GUSBCFG_HNPCAP;
342 if (hsotg->core_params->otg_cap !=
343 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
344 usbcfg |= GUSBCFG_SRPCAP;
345 break;
346
347 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
348 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
349 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
350 if (hsotg->core_params->otg_cap !=
351 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
352 usbcfg |= GUSBCFG_SRPCAP;
353 break;
354
355 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
356 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
357 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
358 default:
359 break;
360 }
361
362 writel(usbcfg, hsotg->regs + GUSBCFG);
363}
364
365/**
366 * dwc2_core_init() - Initializes the DWC_otg controller registers and
367 * prepares the core for device mode or host mode operation
368 *
369 * @hsotg: Programming view of the DWC_otg controller
370 * @select_phy: If true then also set the Phy type
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200371 * @irq: If >= 0, the irq to register
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700372 */
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200373int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700374{
375 u32 usbcfg, otgctl;
376 int retval;
377
378 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
379
380 usbcfg = readl(hsotg->regs + GUSBCFG);
381
382 /* Set ULPI External VBUS bit if needed */
383 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
384 if (hsotg->core_params->phy_ulpi_ext_vbus ==
385 DWC2_PHY_ULPI_EXTERNAL_VBUS)
386 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
387
388 /* Set external TS Dline pulsing bit if needed */
389 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
390 if (hsotg->core_params->ts_dline > 0)
391 usbcfg |= GUSBCFG_TERMSELDLPULSE;
392
393 writel(usbcfg, hsotg->regs + GUSBCFG);
394
395 /* Reset the Controller */
396 dwc2_core_reset(hsotg);
397
398 dev_dbg(hsotg->dev, "num_dev_perio_in_ep=%d\n",
399 hsotg->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT &
400 GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >>
401 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT);
402
403 hsotg->total_fifo_size = hsotg->hwcfg3 >> GHWCFG3_DFIFO_DEPTH_SHIFT &
404 GHWCFG3_DFIFO_DEPTH_MASK >> GHWCFG3_DFIFO_DEPTH_SHIFT;
405 hsotg->rx_fifo_size = readl(hsotg->regs + GRXFSIZ);
406 hsotg->nperio_tx_fifo_size =
407 readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
408
409 dev_dbg(hsotg->dev, "Total FIFO SZ=%d\n", hsotg->total_fifo_size);
410 dev_dbg(hsotg->dev, "RxFIFO SZ=%d\n", hsotg->rx_fifo_size);
411 dev_dbg(hsotg->dev, "NP TxFIFO SZ=%d\n", hsotg->nperio_tx_fifo_size);
412
413 /*
414 * This needs to happen in FS mode before any other programming occurs
415 */
416 dwc2_phy_init(hsotg, select_phy);
417
418 /* Program the GAHBCFG Register */
419 retval = dwc2_gahbcfg_init(hsotg);
420 if (retval)
421 return retval;
422
423 /* Program the GUSBCFG register */
424 dwc2_gusbcfg_init(hsotg);
425
426 /* Program the GOTGCTL register */
427 otgctl = readl(hsotg->regs + GOTGCTL);
428 otgctl &= ~GOTGCTL_OTGVER;
429 if (hsotg->core_params->otg_ver > 0)
430 otgctl |= GOTGCTL_OTGVER;
431 writel(otgctl, hsotg->regs + GOTGCTL);
432 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
433
434 /* Clear the SRP success bit for FS-I2c */
435 hsotg->srp_success = 0;
436
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200437 if (irq >= 0) {
438 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
439 irq);
440 retval = devm_request_irq(hsotg->dev, irq,
441 dwc2_handle_common_intr, IRQF_SHARED,
442 dev_name(hsotg->dev), hsotg);
443 if (retval)
444 return retval;
445 }
446
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700447 /* Enable common interrupts */
448 dwc2_enable_common_interrupts(hsotg);
449
450 /*
451 * Do device or host intialization based on mode during PCD and
452 * HCD initialization
453 */
454 if (dwc2_is_host_mode(hsotg)) {
455 dev_dbg(hsotg->dev, "Host Mode\n");
456 hsotg->op_state = OTG_STATE_A_HOST;
457 } else {
458 dev_dbg(hsotg->dev, "Device Mode\n");
459 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
460 }
461
462 return 0;
463}
464
465/**
466 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
467 *
468 * @hsotg: Programming view of DWC_otg controller
469 */
470void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
471{
472 u32 intmsk;
473
474 dev_dbg(hsotg->dev, "%s()\n", __func__);
475
476 /* Disable all interrupts */
477 writel(0, hsotg->regs + GINTMSK);
478 writel(0, hsotg->regs + HAINTMSK);
479
480 /* Clear any pending interrupts */
481 writel(0xffffffff, hsotg->regs + GINTSTS);
482
483 /* Enable the common interrupts */
484 dwc2_enable_common_interrupts(hsotg);
485
486 /* Enable host mode interrupts without disturbing common interrupts */
487 intmsk = readl(hsotg->regs + GINTMSK);
488 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
489 writel(intmsk, hsotg->regs + GINTMSK);
490}
491
492/**
493 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
494 *
495 * @hsotg: Programming view of DWC_otg controller
496 */
497void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
498{
499 u32 intmsk = readl(hsotg->regs + GINTMSK);
500
501 /* Disable host mode interrupts without disturbing common interrupts */
502 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
503 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
504 writel(intmsk, hsotg->regs + GINTMSK);
505}
506
507static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
508{
509 struct dwc2_core_params *params = hsotg->core_params;
510 u32 rxfsiz, nptxfsiz, ptxfsiz, hptxfsiz, dfifocfg;
511
Matthijs Kooijman12086052013-04-29 19:46:35 +0000512 if (!params->enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700513 return;
514
515 dev_dbg(hsotg->dev, "Total FIFO Size=%d\n", hsotg->total_fifo_size);
516 dev_dbg(hsotg->dev, "Rx FIFO Size=%d\n", params->host_rx_fifo_size);
517 dev_dbg(hsotg->dev, "NP Tx FIFO Size=%d\n",
518 params->host_nperio_tx_fifo_size);
519 dev_dbg(hsotg->dev, "P Tx FIFO Size=%d\n",
520 params->host_perio_tx_fifo_size);
521
522 /* Rx FIFO */
523 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n",
524 readl(hsotg->regs + GRXFSIZ));
525 writel(params->host_rx_fifo_size, hsotg->regs + GRXFSIZ);
526 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
527
528 /* Non-periodic Tx FIFO */
529 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
530 readl(hsotg->regs + GNPTXFSIZ));
531 nptxfsiz = params->host_nperio_tx_fifo_size <<
532 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
533 nptxfsiz |= params->host_rx_fifo_size <<
534 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
535 writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
536 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
537 readl(hsotg->regs + GNPTXFSIZ));
538
539 /* Periodic Tx FIFO */
540 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
541 readl(hsotg->regs + HPTXFSIZ));
542 ptxfsiz = params->host_perio_tx_fifo_size <<
543 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
544 ptxfsiz |= (params->host_rx_fifo_size +
545 params->host_nperio_tx_fifo_size) <<
546 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
547 writel(ptxfsiz, hsotg->regs + HPTXFSIZ);
548 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
549 readl(hsotg->regs + HPTXFSIZ));
550
551 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
552 hsotg->snpsid <= DWC2_CORE_REV_2_94a) {
553 /*
554 * Global DFIFOCFG calculation for Host mode -
555 * include RxFIFO, NPTXFIFO and HPTXFIFO
556 */
557 dfifocfg = readl(hsotg->regs + GDFIFOCFG);
558 rxfsiz = readl(hsotg->regs + GRXFSIZ) & 0x0000ffff;
559 nptxfsiz = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
560 hptxfsiz = readl(hsotg->regs + HPTXFSIZ) >> 16 & 0xffff;
561 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
562 dfifocfg |= (rxfsiz + nptxfsiz + hptxfsiz) <<
563 GDFIFOCFG_EPINFOBASE_SHIFT &
564 GDFIFOCFG_EPINFOBASE_MASK;
565 writel(dfifocfg, hsotg->regs + GDFIFOCFG);
566 }
567}
568
569/**
570 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
571 * Host mode
572 *
573 * @hsotg: Programming view of DWC_otg controller
574 *
575 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
576 * request queues. Host channels are reset to ensure that they are ready for
577 * performing transfers.
578 */
579void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
580{
581 u32 hcfg, hfir, otgctl;
582
583 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
584
585 /* Restart the Phy Clock */
586 writel(0, hsotg->regs + PCGCTL);
587
588 /* Initialize Host Configuration Register */
589 dwc2_init_fs_ls_pclk_sel(hsotg);
590 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
591 hcfg = readl(hsotg->regs + HCFG);
592 hcfg |= HCFG_FSLSSUPP;
593 writel(hcfg, hsotg->regs + HCFG);
594 }
595
596 /*
597 * This bit allows dynamic reloading of the HFIR register during
598 * runtime. This bit needs to be programmed during inital configuration
599 * and its value must not be changed during runtime.
600 */
601 if (hsotg->core_params->reload_ctl > 0) {
602 hfir = readl(hsotg->regs + HFIR);
603 hfir |= HFIR_RLDCTRL;
604 writel(hfir, hsotg->regs + HFIR);
605 }
606
607 if (hsotg->core_params->dma_desc_enable > 0) {
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200608 u32 op_mode = (hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
609 GHWCFG2_OP_MODE_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700610
611 if (hsotg->snpsid < DWC2_CORE_REV_2_90a ||
612 !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA) ||
613 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
614 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
615 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
616 dev_err(hsotg->dev,
617 "Hardware does not support descriptor DMA mode -\n");
618 dev_err(hsotg->dev,
619 "falling back to buffer DMA mode.\n");
620 hsotg->core_params->dma_desc_enable = 0;
621 } else {
622 hcfg = readl(hsotg->regs + HCFG);
623 hcfg |= HCFG_DESCDMA;
624 writel(hcfg, hsotg->regs + HCFG);
625 }
626 }
627
628 /* Configure data FIFO sizes */
629 dwc2_config_fifos(hsotg);
630
631 /* TODO - check this */
632 /* Clear Host Set HNP Enable in the OTG Control Register */
633 otgctl = readl(hsotg->regs + GOTGCTL);
634 otgctl &= ~GOTGCTL_HSTSETHNPEN;
635 writel(otgctl, hsotg->regs + GOTGCTL);
636
637 /* Make sure the FIFOs are flushed */
638 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
639 dwc2_flush_rx_fifo(hsotg);
640
641 /* Clear Host Set HNP Enable in the OTG Control Register */
642 otgctl = readl(hsotg->regs + GOTGCTL);
643 otgctl &= ~GOTGCTL_HSTSETHNPEN;
644 writel(otgctl, hsotg->regs + GOTGCTL);
645
646 if (hsotg->core_params->dma_desc_enable <= 0) {
647 int num_channels, i;
648 u32 hcchar;
649
650 /* Flush out any leftover queued requests */
651 num_channels = hsotg->core_params->host_channels;
652 for (i = 0; i < num_channels; i++) {
653 hcchar = readl(hsotg->regs + HCCHAR(i));
654 hcchar &= ~HCCHAR_CHENA;
655 hcchar |= HCCHAR_CHDIS;
656 hcchar &= ~HCCHAR_EPDIR;
657 writel(hcchar, hsotg->regs + HCCHAR(i));
658 }
659
660 /* Halt all channels to put them into a known state */
661 for (i = 0; i < num_channels; i++) {
662 int count = 0;
663
664 hcchar = readl(hsotg->regs + HCCHAR(i));
665 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
666 hcchar &= ~HCCHAR_EPDIR;
667 writel(hcchar, hsotg->regs + HCCHAR(i));
668 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
669 __func__, i);
670 do {
671 hcchar = readl(hsotg->regs + HCCHAR(i));
672 if (++count > 1000) {
673 dev_err(hsotg->dev,
674 "Unable to clear enable on channel %d\n",
675 i);
676 break;
677 }
678 udelay(1);
679 } while (hcchar & HCCHAR_CHENA);
680 }
681 }
682
683 /* Turn on the vbus power */
684 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
685 if (hsotg->op_state == OTG_STATE_A_HOST) {
686 u32 hprt0 = dwc2_read_hprt0(hsotg);
687
688 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
689 !!(hprt0 & HPRT0_PWR));
690 if (!(hprt0 & HPRT0_PWR)) {
691 hprt0 |= HPRT0_PWR;
692 writel(hprt0, hsotg->regs + HPRT0);
693 }
694 }
695
696 dwc2_enable_host_interrupts(hsotg);
697}
698
699static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
700 struct dwc2_host_chan *chan)
701{
702 u32 hcintmsk = HCINTMSK_CHHLTD;
703
704 switch (chan->ep_type) {
705 case USB_ENDPOINT_XFER_CONTROL:
706 case USB_ENDPOINT_XFER_BULK:
707 dev_vdbg(hsotg->dev, "control/bulk\n");
708 hcintmsk |= HCINTMSK_XFERCOMPL;
709 hcintmsk |= HCINTMSK_STALL;
710 hcintmsk |= HCINTMSK_XACTERR;
711 hcintmsk |= HCINTMSK_DATATGLERR;
712 if (chan->ep_is_in) {
713 hcintmsk |= HCINTMSK_BBLERR;
714 } else {
715 hcintmsk |= HCINTMSK_NAK;
716 hcintmsk |= HCINTMSK_NYET;
717 if (chan->do_ping)
718 hcintmsk |= HCINTMSK_ACK;
719 }
720
721 if (chan->do_split) {
722 hcintmsk |= HCINTMSK_NAK;
723 if (chan->complete_split)
724 hcintmsk |= HCINTMSK_NYET;
725 else
726 hcintmsk |= HCINTMSK_ACK;
727 }
728
729 if (chan->error_state)
730 hcintmsk |= HCINTMSK_ACK;
731 break;
732
733 case USB_ENDPOINT_XFER_INT:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200734 if (dbg_perio())
735 dev_vdbg(hsotg->dev, "intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700736 hcintmsk |= HCINTMSK_XFERCOMPL;
737 hcintmsk |= HCINTMSK_NAK;
738 hcintmsk |= HCINTMSK_STALL;
739 hcintmsk |= HCINTMSK_XACTERR;
740 hcintmsk |= HCINTMSK_DATATGLERR;
741 hcintmsk |= HCINTMSK_FRMOVRUN;
742
743 if (chan->ep_is_in)
744 hcintmsk |= HCINTMSK_BBLERR;
745 if (chan->error_state)
746 hcintmsk |= HCINTMSK_ACK;
747 if (chan->do_split) {
748 if (chan->complete_split)
749 hcintmsk |= HCINTMSK_NYET;
750 else
751 hcintmsk |= HCINTMSK_ACK;
752 }
753 break;
754
755 case USB_ENDPOINT_XFER_ISOC:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200756 if (dbg_perio())
757 dev_vdbg(hsotg->dev, "isoc\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700758 hcintmsk |= HCINTMSK_XFERCOMPL;
759 hcintmsk |= HCINTMSK_FRMOVRUN;
760 hcintmsk |= HCINTMSK_ACK;
761
762 if (chan->ep_is_in) {
763 hcintmsk |= HCINTMSK_XACTERR;
764 hcintmsk |= HCINTMSK_BBLERR;
765 }
766 break;
767 default:
768 dev_err(hsotg->dev, "## Unknown EP type ##\n");
769 break;
770 }
771
772 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200773 if (dbg_hc(chan))
774 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700775}
776
777static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
778 struct dwc2_host_chan *chan)
779{
780 u32 hcintmsk = HCINTMSK_CHHLTD;
781
782 /*
783 * For Descriptor DMA mode core halts the channel on AHB error.
784 * Interrupt is not required.
785 */
786 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200787 if (dbg_hc(chan))
788 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700789 hcintmsk |= HCINTMSK_AHBERR;
790 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200791 if (dbg_hc(chan))
792 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700793 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
794 hcintmsk |= HCINTMSK_XFERCOMPL;
795 }
796
797 if (chan->error_state && !chan->do_split &&
798 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200799 if (dbg_hc(chan))
800 dev_vdbg(hsotg->dev, "setting ACK\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700801 hcintmsk |= HCINTMSK_ACK;
802 if (chan->ep_is_in) {
803 hcintmsk |= HCINTMSK_DATATGLERR;
804 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
805 hcintmsk |= HCINTMSK_NAK;
806 }
807 }
808
809 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200810 if (dbg_hc(chan))
811 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700812}
813
814static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
815 struct dwc2_host_chan *chan)
816{
817 u32 intmsk;
818
819 if (hsotg->core_params->dma_enable > 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200820 if (dbg_hc(chan))
821 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700822 dwc2_hc_enable_dma_ints(hsotg, chan);
823 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200824 if (dbg_hc(chan))
825 dev_vdbg(hsotg->dev, "DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700826 dwc2_hc_enable_slave_ints(hsotg, chan);
827 }
828
829 /* Enable the top level host channel interrupt */
830 intmsk = readl(hsotg->regs + HAINTMSK);
831 intmsk |= 1 << chan->hc_num;
832 writel(intmsk, hsotg->regs + HAINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200833 if (dbg_hc(chan))
834 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700835
836 /* Make sure host channel interrupts are enabled */
837 intmsk = readl(hsotg->regs + GINTMSK);
838 intmsk |= GINTSTS_HCHINT;
839 writel(intmsk, hsotg->regs + GINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200840 if (dbg_hc(chan))
841 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700842}
843
844/**
845 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
846 * a specific endpoint
847 *
848 * @hsotg: Programming view of DWC_otg controller
849 * @chan: Information needed to initialize the host channel
850 *
851 * The HCCHARn register is set up with the characteristics specified in chan.
852 * Host channel interrupts that may need to be serviced while this transfer is
853 * in progress are enabled.
854 */
855void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
856{
857 u8 hc_num = chan->hc_num;
858 u32 hcintmsk;
859 u32 hcchar;
860 u32 hcsplt = 0;
861
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200862 if (dbg_hc(chan))
863 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700864
865 /* Clear old interrupt conditions for this host channel */
866 hcintmsk = 0xffffffff;
867 hcintmsk &= ~HCINTMSK_RESERVED14_31;
868 writel(hcintmsk, hsotg->regs + HCINT(hc_num));
869
870 /* Enable channel interrupts required for this transfer */
871 dwc2_hc_enable_ints(hsotg, chan);
872
873 /*
874 * Program the HCCHARn register with the endpoint characteristics for
875 * the current transfer
876 */
877 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
878 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
879 if (chan->ep_is_in)
880 hcchar |= HCCHAR_EPDIR;
881 if (chan->speed == USB_SPEED_LOW)
882 hcchar |= HCCHAR_LSPDDEV;
883 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
884 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
885 writel(hcchar, hsotg->regs + HCCHAR(hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200886 if (dbg_hc(chan)) {
887 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
888 hc_num, hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700889
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200890 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, hc_num);
891 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
892 hcchar >> HCCHAR_DEVADDR_SHIFT &
893 HCCHAR_DEVADDR_MASK >> HCCHAR_DEVADDR_SHIFT);
894 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
895 hcchar >> HCCHAR_EPNUM_SHIFT &
896 HCCHAR_EPNUM_MASK >> HCCHAR_EPNUM_SHIFT);
897 dev_vdbg(hsotg->dev, " Is In: %d\n",
898 !!(hcchar & HCCHAR_EPDIR));
899 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
900 !!(hcchar & HCCHAR_LSPDDEV));
901 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
902 hcchar >> HCCHAR_EPTYPE_SHIFT &
903 HCCHAR_EPTYPE_MASK >> HCCHAR_EPTYPE_SHIFT);
904 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
905 hcchar >> HCCHAR_MPS_SHIFT &
906 HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
907 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
908 hcchar >> HCCHAR_MULTICNT_SHIFT &
909 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
910 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700911
912 /* Program the HCSPLT register for SPLITs */
913 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200914 if (dbg_hc(chan))
915 dev_vdbg(hsotg->dev,
916 "Programming HC %d with split --> %s\n",
917 hc_num,
918 chan->complete_split ? "CSPLIT" : "SSPLIT");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700919 if (chan->complete_split)
920 hcsplt |= HCSPLT_COMPSPLT;
921 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
922 HCSPLT_XACTPOS_MASK;
923 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
924 HCSPLT_HUBADDR_MASK;
925 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
926 HCSPLT_PRTADDR_MASK;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200927 if (dbg_hc(chan)) {
928 dev_vdbg(hsotg->dev, " comp split %d\n",
929 chan->complete_split);
930 dev_vdbg(hsotg->dev, " xact pos %d\n",
931 chan->xact_pos);
932 dev_vdbg(hsotg->dev, " hub addr %d\n",
933 chan->hub_addr);
934 dev_vdbg(hsotg->dev, " hub port %d\n",
935 chan->hub_port);
936 dev_vdbg(hsotg->dev, " is_in %d\n",
937 chan->ep_is_in);
938 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
939 hcchar >> HCCHAR_MPS_SHIFT &
940 HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
941 dev_vdbg(hsotg->dev, " xferlen %d\n",
942 chan->xfer_len);
943 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700944 }
945
946 writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
947}
948
949/**
950 * dwc2_hc_halt() - Attempts to halt a host channel
951 *
952 * @hsotg: Controller register interface
953 * @chan: Host channel to halt
954 * @halt_status: Reason for halting the channel
955 *
956 * This function should only be called in Slave mode or to abort a transfer in
957 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
958 * controller halts the channel when the transfer is complete or a condition
959 * occurs that requires application intervention.
960 *
961 * In slave mode, checks for a free request queue entry, then sets the Channel
962 * Enable and Channel Disable bits of the Host Channel Characteristics
963 * register of the specified channel to intiate the halt. If there is no free
964 * request queue entry, sets only the Channel Disable bit of the HCCHARn
965 * register to flush requests for this channel. In the latter case, sets a
966 * flag to indicate that the host channel needs to be halted when a request
967 * queue slot is open.
968 *
969 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
970 * HCCHARn register. The controller ensures there is space in the request
971 * queue before submitting the halt request.
972 *
973 * Some time may elapse before the core flushes any posted requests for this
974 * host channel and halts. The Channel Halted interrupt handler completes the
975 * deactivation of the host channel.
976 */
977void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
978 enum dwc2_halt_status halt_status)
979{
980 u32 nptxsts, hptxsts, hcchar;
981
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200982 if (dbg_hc(chan))
983 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700984 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
985 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
986
987 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
988 halt_status == DWC2_HC_XFER_AHB_ERR) {
989 /*
990 * Disable all channel interrupts except Ch Halted. The QTD
991 * and QH state associated with this transfer has been cleared
992 * (in the case of URB_DEQUEUE), so the channel needs to be
993 * shut down carefully to prevent crashes.
994 */
995 u32 hcintmsk = HCINTMSK_CHHLTD;
996
997 dev_vdbg(hsotg->dev, "dequeue/error\n");
998 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
999
1000 /*
1001 * Make sure no other interrupts besides halt are currently
1002 * pending. Handling another interrupt could cause a crash due
1003 * to the QTD and QH state.
1004 */
1005 writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1006
1007 /*
1008 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1009 * even if the channel was already halted for some other
1010 * reason
1011 */
1012 chan->halt_status = halt_status;
1013
1014 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1015 if (!(hcchar & HCCHAR_CHENA)) {
1016 /*
1017 * The channel is either already halted or it hasn't
1018 * started yet. In DMA mode, the transfer may halt if
1019 * it finishes normally or a condition occurs that
1020 * requires driver intervention. Don't want to halt
1021 * the channel again. In either Slave or DMA mode,
1022 * it's possible that the transfer has been assigned
1023 * to a channel, but not started yet when an URB is
1024 * dequeued. Don't want to halt a channel that hasn't
1025 * started yet.
1026 */
1027 return;
1028 }
1029 }
1030 if (chan->halt_pending) {
1031 /*
1032 * A halt has already been issued for this channel. This might
1033 * happen when a transfer is aborted by a higher level in
1034 * the stack.
1035 */
1036 dev_vdbg(hsotg->dev,
1037 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1038 __func__, chan->hc_num);
1039 return;
1040 }
1041
1042 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1043
1044 /* No need to set the bit in DDMA for disabling the channel */
1045 /* TODO check it everywhere channel is disabled */
1046 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001047 if (dbg_hc(chan))
1048 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001049 hcchar |= HCCHAR_CHENA;
1050 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001051 if (dbg_hc(chan))
1052 dev_dbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001053 }
1054 hcchar |= HCCHAR_CHDIS;
1055
1056 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001057 if (dbg_hc(chan))
1058 dev_vdbg(hsotg->dev, "DMA not enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001059 hcchar |= HCCHAR_CHENA;
1060
1061 /* Check for space in the request queue to issue the halt */
1062 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1063 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1064 dev_vdbg(hsotg->dev, "control/bulk\n");
1065 nptxsts = readl(hsotg->regs + GNPTXSTS);
1066 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1067 dev_vdbg(hsotg->dev, "Disabling channel\n");
1068 hcchar &= ~HCCHAR_CHENA;
1069 }
1070 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001071 if (dbg_perio())
1072 dev_vdbg(hsotg->dev, "isoc/intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001073 hptxsts = readl(hsotg->regs + HPTXSTS);
1074 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1075 hsotg->queuing_high_bandwidth) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001076 if (dbg_perio())
1077 dev_vdbg(hsotg->dev, "Disabling channel\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001078 hcchar &= ~HCCHAR_CHENA;
1079 }
1080 }
1081 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001082 if (dbg_hc(chan))
1083 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001084 }
1085
1086 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1087 chan->halt_status = halt_status;
1088
1089 if (hcchar & HCCHAR_CHENA) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001090 if (dbg_hc(chan))
1091 dev_vdbg(hsotg->dev, "Channel enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001092 chan->halt_pending = 1;
1093 chan->halt_on_queue = 0;
1094 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001095 if (dbg_hc(chan))
1096 dev_vdbg(hsotg->dev, "Channel disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001097 chan->halt_on_queue = 1;
1098 }
1099
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001100 if (dbg_hc(chan)) {
1101 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1102 chan->hc_num);
1103 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1104 hcchar);
1105 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1106 chan->halt_pending);
1107 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1108 chan->halt_on_queue);
1109 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1110 chan->halt_status);
1111 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001112}
1113
1114/**
1115 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1116 *
1117 * @hsotg: Programming view of DWC_otg controller
1118 * @chan: Identifies the host channel to clean up
1119 *
1120 * This function is normally called after a transfer is done and the host
1121 * channel is being released
1122 */
1123void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1124{
1125 u32 hcintmsk;
1126
1127 chan->xfer_started = 0;
1128
1129 /*
1130 * Clear channel interrupt enables and any unhandled channel interrupt
1131 * conditions
1132 */
1133 writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1134 hcintmsk = 0xffffffff;
1135 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1136 writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1137}
1138
1139/**
1140 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1141 * which frame a periodic transfer should occur
1142 *
1143 * @hsotg: Programming view of DWC_otg controller
1144 * @chan: Identifies the host channel to set up and its properties
1145 * @hcchar: Current value of the HCCHAR register for the specified host channel
1146 *
1147 * This function has no effect on non-periodic transfers
1148 */
1149static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1150 struct dwc2_host_chan *chan, u32 *hcchar)
1151{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001152 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1153 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001154 /* 1 if _next_ frame is odd, 0 if it's even */
Paul Zimmerman81a58952013-06-24 11:34:23 -07001155 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001156 *hcchar |= HCCHAR_ODDFRM;
1157 }
1158}
1159
1160static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1161{
1162 /* Set up the initial PID for the transfer */
1163 if (chan->speed == USB_SPEED_HIGH) {
1164 if (chan->ep_is_in) {
1165 if (chan->multi_count == 1)
1166 chan->data_pid_start = DWC2_HC_PID_DATA0;
1167 else if (chan->multi_count == 2)
1168 chan->data_pid_start = DWC2_HC_PID_DATA1;
1169 else
1170 chan->data_pid_start = DWC2_HC_PID_DATA2;
1171 } else {
1172 if (chan->multi_count == 1)
1173 chan->data_pid_start = DWC2_HC_PID_DATA0;
1174 else
1175 chan->data_pid_start = DWC2_HC_PID_MDATA;
1176 }
1177 } else {
1178 chan->data_pid_start = DWC2_HC_PID_DATA0;
1179 }
1180}
1181
1182/**
1183 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1184 * the Host Channel
1185 *
1186 * @hsotg: Programming view of DWC_otg controller
1187 * @chan: Information needed to initialize the host channel
1188 *
1189 * This function should only be called in Slave mode. For a channel associated
1190 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1191 * associated with a periodic EP, the periodic Tx FIFO is written.
1192 *
1193 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1194 * the number of bytes written to the Tx FIFO.
1195 */
1196static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1197 struct dwc2_host_chan *chan)
1198{
1199 u32 i;
1200 u32 remaining_count;
1201 u32 byte_count;
1202 u32 dword_count;
1203 u32 __iomem *data_fifo;
1204 u32 *data_buf = (u32 *)chan->xfer_buf;
1205
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001206 if (dbg_hc(chan))
1207 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001208
1209 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1210
1211 remaining_count = chan->xfer_len - chan->xfer_count;
1212 if (remaining_count > chan->max_packet)
1213 byte_count = chan->max_packet;
1214 else
1215 byte_count = remaining_count;
1216
1217 dword_count = (byte_count + 3) / 4;
1218
1219 if (((unsigned long)data_buf & 0x3) == 0) {
1220 /* xfer_buf is DWORD aligned */
1221 for (i = 0; i < dword_count; i++, data_buf++)
1222 writel(*data_buf, data_fifo);
1223 } else {
1224 /* xfer_buf is not DWORD aligned */
1225 for (i = 0; i < dword_count; i++, data_buf++) {
1226 u32 data = data_buf[0] | data_buf[1] << 8 |
1227 data_buf[2] << 16 | data_buf[3] << 24;
1228 writel(data, data_fifo);
1229 }
1230 }
1231
1232 chan->xfer_count += byte_count;
1233 chan->xfer_buf += byte_count;
1234}
1235
1236/**
1237 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1238 * channel and starts the transfer
1239 *
1240 * @hsotg: Programming view of DWC_otg controller
1241 * @chan: Information needed to initialize the host channel. The xfer_len value
1242 * may be reduced to accommodate the max widths of the XferSize and
1243 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1244 * changed to reflect the final xfer_len value.
1245 *
1246 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1247 * the caller must ensure that there is sufficient space in the request queue
1248 * and Tx Data FIFO.
1249 *
1250 * For an OUT transfer in Slave mode, it loads a data packet into the
1251 * appropriate FIFO. If necessary, additional data packets are loaded in the
1252 * Host ISR.
1253 *
1254 * For an IN transfer in Slave mode, a data packet is requested. The data
1255 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1256 * additional data packets are requested in the Host ISR.
1257 *
1258 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1259 * register along with a packet count of 1 and the channel is enabled. This
1260 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1261 * simply set to 0 since no data transfer occurs in this case.
1262 *
1263 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1264 * all the information required to perform the subsequent data transfer. In
1265 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1266 * controller performs the entire PING protocol, then starts the data
1267 * transfer.
1268 */
1269void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1270 struct dwc2_host_chan *chan)
1271{
1272 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1273 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1274 u32 hcchar;
1275 u32 hctsiz = 0;
1276 u16 num_packets;
1277
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001278 if (dbg_hc(chan))
1279 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001280
1281 if (chan->do_ping) {
1282 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001283 if (dbg_hc(chan))
1284 dev_vdbg(hsotg->dev, "ping, no DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001285 dwc2_hc_do_ping(hsotg, chan);
1286 chan->xfer_started = 1;
1287 return;
1288 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001289 if (dbg_hc(chan))
1290 dev_vdbg(hsotg->dev, "ping, DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001291 hctsiz |= TSIZ_DOPNG;
1292 }
1293 }
1294
1295 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001296 if (dbg_hc(chan))
1297 dev_vdbg(hsotg->dev, "split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001298 num_packets = 1;
1299
1300 if (chan->complete_split && !chan->ep_is_in)
1301 /*
1302 * For CSPLIT OUT Transfer, set the size to 0 so the
1303 * core doesn't expect any data written to the FIFO
1304 */
1305 chan->xfer_len = 0;
1306 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1307 chan->xfer_len = chan->max_packet;
1308 else if (!chan->ep_is_in && chan->xfer_len > 188)
1309 chan->xfer_len = 188;
1310
1311 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1312 TSIZ_XFERSIZE_MASK;
1313 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001314 if (dbg_hc(chan))
1315 dev_vdbg(hsotg->dev, "no split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001316 /*
1317 * Ensure that the transfer length and packet count will fit
1318 * in the widths allocated for them in the HCTSIZn register
1319 */
1320 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1321 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1322 /*
1323 * Make sure the transfer size is no larger than one
1324 * (micro)frame's worth of data. (A check was done
1325 * when the periodic transfer was accepted to ensure
1326 * that a (micro)frame's worth of data can be
1327 * programmed into a channel.)
1328 */
1329 u32 max_periodic_len =
1330 chan->multi_count * chan->max_packet;
1331
1332 if (chan->xfer_len > max_periodic_len)
1333 chan->xfer_len = max_periodic_len;
1334 } else if (chan->xfer_len > max_hc_xfer_size) {
1335 /*
1336 * Make sure that xfer_len is a multiple of max packet
1337 * size
1338 */
1339 chan->xfer_len =
1340 max_hc_xfer_size - chan->max_packet + 1;
1341 }
1342
1343 if (chan->xfer_len > 0) {
1344 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1345 chan->max_packet;
1346 if (num_packets > max_hc_pkt_count) {
1347 num_packets = max_hc_pkt_count;
1348 chan->xfer_len = num_packets * chan->max_packet;
1349 }
1350 } else {
1351 /* Need 1 packet for transfer length of 0 */
1352 num_packets = 1;
1353 }
1354
1355 if (chan->ep_is_in)
1356 /*
1357 * Always program an integral # of max packets for IN
1358 * transfers
1359 */
1360 chan->xfer_len = num_packets * chan->max_packet;
1361
1362 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1363 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1364 /*
1365 * Make sure that the multi_count field matches the
1366 * actual transfer length
1367 */
1368 chan->multi_count = num_packets;
1369
1370 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1371 dwc2_set_pid_isoc(chan);
1372
1373 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1374 TSIZ_XFERSIZE_MASK;
1375 }
1376
1377 chan->start_pkt_count = num_packets;
1378 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1379 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1380 TSIZ_SC_MC_PID_MASK;
1381 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001382 if (dbg_hc(chan)) {
1383 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1384 hctsiz, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001385
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001386 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1387 chan->hc_num);
1388 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
1389 hctsiz >> TSIZ_XFERSIZE_SHIFT &
1390 TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
1391 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
1392 hctsiz >> TSIZ_PKTCNT_SHIFT &
1393 TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
1394 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1395 hctsiz >> TSIZ_SC_MC_PID_SHIFT &
1396 TSIZ_SC_MC_PID_MASK >> TSIZ_SC_MC_PID_SHIFT);
1397 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001398
1399 if (hsotg->core_params->dma_enable > 0) {
1400 dma_addr_t dma_addr;
1401
1402 if (chan->align_buf) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001403 if (dbg_hc(chan))
1404 dev_vdbg(hsotg->dev, "align_buf\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001405 dma_addr = chan->align_buf;
1406 } else {
1407 dma_addr = chan->xfer_dma;
1408 }
1409 writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001410 if (dbg_hc(chan))
1411 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1412 (unsigned long)dma_addr, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001413 }
1414
1415 /* Start the split */
1416 if (chan->do_split) {
1417 u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1418
1419 hcsplt |= HCSPLT_SPLTENA;
1420 writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1421 }
1422
1423 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1424 hcchar &= ~HCCHAR_MULTICNT_MASK;
1425 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1426 HCCHAR_MULTICNT_MASK;
1427 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1428
1429 if (hcchar & HCCHAR_CHDIS)
1430 dev_warn(hsotg->dev,
1431 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1432 __func__, chan->hc_num, hcchar);
1433
1434 /* Set host channel enable after all other setup is complete */
1435 hcchar |= HCCHAR_CHENA;
1436 hcchar &= ~HCCHAR_CHDIS;
1437
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001438 if (dbg_hc(chan))
1439 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1440 hcchar >> HCCHAR_MULTICNT_SHIFT &
1441 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001442
1443 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001444 if (dbg_hc(chan))
1445 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1446 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001447
1448 chan->xfer_started = 1;
1449 chan->requests++;
1450
1451 if (hsotg->core_params->dma_enable <= 0 &&
1452 !chan->ep_is_in && chan->xfer_len > 0)
1453 /* Load OUT packet into the appropriate Tx FIFO */
1454 dwc2_hc_write_packet(hsotg, chan);
1455}
1456
1457/**
1458 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1459 * host channel and starts the transfer in Descriptor DMA mode
1460 *
1461 * @hsotg: Programming view of DWC_otg controller
1462 * @chan: Information needed to initialize the host channel
1463 *
1464 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1465 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1466 * with micro-frame bitmap.
1467 *
1468 * Initializes HCDMA register with descriptor list address and CTD value then
1469 * starts the transfer via enabling the channel.
1470 */
1471void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1472 struct dwc2_host_chan *chan)
1473{
1474 u32 hcchar;
1475 u32 hc_dma;
1476 u32 hctsiz = 0;
1477
1478 if (chan->do_ping)
1479 hctsiz |= TSIZ_DOPNG;
1480
1481 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1482 dwc2_set_pid_isoc(chan);
1483
1484 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1485 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1486 TSIZ_SC_MC_PID_MASK;
1487
1488 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1489 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1490
1491 /* Non-zero only for high-speed interrupt endpoints */
1492 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1493
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001494 if (dbg_hc(chan)) {
1495 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1496 chan->hc_num);
1497 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1498 chan->data_pid_start);
1499 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1500 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001501
1502 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1503
1504 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1505
1506 /* Always start from first descriptor */
1507 hc_dma &= ~HCDMA_CTD_MASK;
1508 writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001509 if (dbg_hc(chan))
1510 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1511 hc_dma, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001512
1513 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1514 hcchar &= ~HCCHAR_MULTICNT_MASK;
1515 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1516 HCCHAR_MULTICNT_MASK;
1517
1518 if (hcchar & HCCHAR_CHDIS)
1519 dev_warn(hsotg->dev,
1520 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1521 __func__, chan->hc_num, hcchar);
1522
1523 /* Set host channel enable after all other setup is complete */
1524 hcchar |= HCCHAR_CHENA;
1525 hcchar &= ~HCCHAR_CHDIS;
1526
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001527 if (dbg_hc(chan))
1528 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
1529 hcchar >> HCCHAR_MULTICNT_SHIFT &
1530 HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001531
1532 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001533 if (dbg_hc(chan))
1534 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1535 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001536
1537 chan->xfer_started = 1;
1538 chan->requests++;
1539}
1540
1541/**
1542 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1543 * a previous call to dwc2_hc_start_transfer()
1544 *
1545 * @hsotg: Programming view of DWC_otg controller
1546 * @chan: Information needed to initialize the host channel
1547 *
1548 * The caller must ensure there is sufficient space in the request queue and Tx
1549 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1550 * the controller acts autonomously to complete transfers programmed to a host
1551 * channel.
1552 *
1553 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1554 * if there is any data remaining to be queued. For an IN transfer, another
1555 * data packet is always requested. For the SETUP phase of a control transfer,
1556 * this function does nothing.
1557 *
1558 * Return: 1 if a new request is queued, 0 if no more requests are required
1559 * for this transfer
1560 */
1561int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1562 struct dwc2_host_chan *chan)
1563{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001564 if (dbg_hc(chan))
1565 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1566 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001567
1568 if (chan->do_split)
1569 /* SPLITs always queue just once per channel */
1570 return 0;
1571
1572 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1573 /* SETUPs are queued only once since they can't be NAK'd */
1574 return 0;
1575
1576 if (chan->ep_is_in) {
1577 /*
1578 * Always queue another request for other IN transfers. If
1579 * back-to-back INs are issued and NAKs are received for both,
1580 * the driver may still be processing the first NAK when the
1581 * second NAK is received. When the interrupt handler clears
1582 * the NAK interrupt for the first NAK, the second NAK will
1583 * not be seen. So we can't depend on the NAK interrupt
1584 * handler to requeue a NAK'd request. Instead, IN requests
1585 * are issued each time this function is called. When the
1586 * transfer completes, the extra requests for the channel will
1587 * be flushed.
1588 */
1589 u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1590
1591 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1592 hcchar |= HCCHAR_CHENA;
1593 hcchar &= ~HCCHAR_CHDIS;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001594 if (dbg_hc(chan))
1595 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1596 hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001597 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1598 chan->requests++;
1599 return 1;
1600 }
1601
1602 /* OUT transfers */
1603
1604 if (chan->xfer_count < chan->xfer_len) {
1605 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1606 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1607 u32 hcchar = readl(hsotg->regs +
1608 HCCHAR(chan->hc_num));
1609
1610 dwc2_hc_set_even_odd_frame(hsotg, chan,
1611 &hcchar);
1612 }
1613
1614 /* Load OUT packet into the appropriate Tx FIFO */
1615 dwc2_hc_write_packet(hsotg, chan);
1616 chan->requests++;
1617 return 1;
1618 }
1619
1620 return 0;
1621}
1622
1623/**
1624 * dwc2_hc_do_ping() - Starts a PING transfer
1625 *
1626 * @hsotg: Programming view of DWC_otg controller
1627 * @chan: Information needed to initialize the host channel
1628 *
1629 * This function should only be called in Slave mode. The Do Ping bit is set in
1630 * the HCTSIZ register, then the channel is enabled.
1631 */
1632void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1633{
1634 u32 hcchar;
1635 u32 hctsiz;
1636
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001637 if (dbg_hc(chan))
1638 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1639 chan->hc_num);
1640
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001641
1642 hctsiz = TSIZ_DOPNG;
1643 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1644 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1645
1646 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1647 hcchar |= HCCHAR_CHENA;
1648 hcchar &= ~HCCHAR_CHDIS;
1649 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1650}
1651
1652/**
1653 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1654 * the HFIR register according to PHY type and speed
1655 *
1656 * @hsotg: Programming view of DWC_otg controller
1657 *
1658 * NOTE: The caller can modify the value of the HFIR register only after the
1659 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1660 * has been set
1661 */
1662u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1663{
1664 u32 usbcfg;
1665 u32 hwcfg2;
1666 u32 hprt0;
1667 int clock = 60; /* default value */
1668
1669 usbcfg = readl(hsotg->regs + GUSBCFG);
1670 hwcfg2 = readl(hsotg->regs + GHWCFG2);
1671 hprt0 = readl(hsotg->regs + HPRT0);
1672
1673 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1674 !(usbcfg & GUSBCFG_PHYIF16))
1675 clock = 60;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001676 if ((usbcfg & GUSBCFG_PHYSEL) &&
1677 (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> GHWCFG2_FS_PHY_TYPE_SHIFT ==
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001678 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1679 clock = 48;
1680 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1681 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1682 clock = 30;
1683 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1684 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1685 clock = 60;
1686 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1687 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1688 clock = 48;
1689 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001690 (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> GHWCFG2_FS_PHY_TYPE_SHIFT ==
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001691 GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
1692 clock = 48;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001693 if ((usbcfg & GUSBCFG_PHYSEL) &&
1694 (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >> GHWCFG2_FS_PHY_TYPE_SHIFT ==
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001695 GHWCFG2_FS_PHY_TYPE_DEDICATED)
1696 clock = 48;
1697
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001698 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001699 /* High speed case */
1700 return 125 * clock;
1701 else
1702 /* FS/LS case */
1703 return 1000 * clock;
1704}
1705
1706/**
1707 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1708 * buffer
1709 *
1710 * @core_if: Programming view of DWC_otg controller
1711 * @dest: Destination buffer for the packet
1712 * @bytes: Number of bytes to copy to the destination
1713 */
1714void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1715{
1716 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
1717 u32 *data_buf = (u32 *)dest;
1718 int word_count = (bytes + 3) / 4;
1719 int i;
1720
1721 /*
1722 * Todo: Account for the case where dest is not dword aligned. This
1723 * requires reading data from the FIFO into a u32 temp buffer, then
1724 * moving it into the data buffer.
1725 */
1726
1727 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1728
1729 for (i = 0; i < word_count; i++, data_buf++)
1730 *data_buf = readl(fifo);
1731}
1732
1733/**
1734 * dwc2_dump_host_registers() - Prints the host registers
1735 *
1736 * @hsotg: Programming view of DWC_otg controller
1737 *
1738 * NOTE: This function will be removed once the peripheral controller code
1739 * is integrated and the driver is stable
1740 */
1741void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1742{
1743#ifdef DEBUG
1744 u32 __iomem *addr;
1745 int i;
1746
1747 dev_dbg(hsotg->dev, "Host Global Registers\n");
1748 addr = hsotg->regs + HCFG;
1749 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
1750 (unsigned long)addr, readl(addr));
1751 addr = hsotg->regs + HFIR;
1752 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
1753 (unsigned long)addr, readl(addr));
1754 addr = hsotg->regs + HFNUM;
1755 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
1756 (unsigned long)addr, readl(addr));
1757 addr = hsotg->regs + HPTXSTS;
1758 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
1759 (unsigned long)addr, readl(addr));
1760 addr = hsotg->regs + HAINT;
1761 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
1762 (unsigned long)addr, readl(addr));
1763 addr = hsotg->regs + HAINTMSK;
1764 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
1765 (unsigned long)addr, readl(addr));
1766 if (hsotg->core_params->dma_desc_enable > 0) {
1767 addr = hsotg->regs + HFLBADDR;
1768 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1769 (unsigned long)addr, readl(addr));
1770 }
1771
1772 addr = hsotg->regs + HPRT0;
1773 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
1774 (unsigned long)addr, readl(addr));
1775
1776 for (i = 0; i < hsotg->core_params->host_channels; i++) {
1777 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1778 addr = hsotg->regs + HCCHAR(i);
1779 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
1780 (unsigned long)addr, readl(addr));
1781 addr = hsotg->regs + HCSPLT(i);
1782 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
1783 (unsigned long)addr, readl(addr));
1784 addr = hsotg->regs + HCINT(i);
1785 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
1786 (unsigned long)addr, readl(addr));
1787 addr = hsotg->regs + HCINTMSK(i);
1788 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
1789 (unsigned long)addr, readl(addr));
1790 addr = hsotg->regs + HCTSIZ(i);
1791 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
1792 (unsigned long)addr, readl(addr));
1793 addr = hsotg->regs + HCDMA(i);
1794 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
1795 (unsigned long)addr, readl(addr));
1796 if (hsotg->core_params->dma_desc_enable > 0) {
1797 addr = hsotg->regs + HCDMAB(i);
1798 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
1799 (unsigned long)addr, readl(addr));
1800 }
1801 }
1802#endif
1803}
1804
1805/**
1806 * dwc2_dump_global_registers() - Prints the core global registers
1807 *
1808 * @hsotg: Programming view of DWC_otg controller
1809 *
1810 * NOTE: This function will be removed once the peripheral controller code
1811 * is integrated and the driver is stable
1812 */
1813void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1814{
1815#ifdef DEBUG
1816 u32 __iomem *addr;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001817
1818 dev_dbg(hsotg->dev, "Core Global Registers\n");
1819 addr = hsotg->regs + GOTGCTL;
1820 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
1821 (unsigned long)addr, readl(addr));
1822 addr = hsotg->regs + GOTGINT;
1823 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
1824 (unsigned long)addr, readl(addr));
1825 addr = hsotg->regs + GAHBCFG;
1826 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
1827 (unsigned long)addr, readl(addr));
1828 addr = hsotg->regs + GUSBCFG;
1829 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
1830 (unsigned long)addr, readl(addr));
1831 addr = hsotg->regs + GRSTCTL;
1832 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
1833 (unsigned long)addr, readl(addr));
1834 addr = hsotg->regs + GINTSTS;
1835 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
1836 (unsigned long)addr, readl(addr));
1837 addr = hsotg->regs + GINTMSK;
1838 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
1839 (unsigned long)addr, readl(addr));
1840 addr = hsotg->regs + GRXSTSR;
1841 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
1842 (unsigned long)addr, readl(addr));
1843 addr = hsotg->regs + GRXFSIZ;
1844 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
1845 (unsigned long)addr, readl(addr));
1846 addr = hsotg->regs + GNPTXFSIZ;
1847 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
1848 (unsigned long)addr, readl(addr));
1849 addr = hsotg->regs + GNPTXSTS;
1850 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
1851 (unsigned long)addr, readl(addr));
1852 addr = hsotg->regs + GI2CCTL;
1853 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
1854 (unsigned long)addr, readl(addr));
1855 addr = hsotg->regs + GPVNDCTL;
1856 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
1857 (unsigned long)addr, readl(addr));
1858 addr = hsotg->regs + GGPIO;
1859 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
1860 (unsigned long)addr, readl(addr));
1861 addr = hsotg->regs + GUID;
1862 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
1863 (unsigned long)addr, readl(addr));
1864 addr = hsotg->regs + GSNPSID;
1865 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
1866 (unsigned long)addr, readl(addr));
1867 addr = hsotg->regs + GHWCFG1;
1868 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
1869 (unsigned long)addr, readl(addr));
1870 addr = hsotg->regs + GHWCFG2;
1871 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
1872 (unsigned long)addr, readl(addr));
1873 addr = hsotg->regs + GHWCFG3;
1874 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
1875 (unsigned long)addr, readl(addr));
1876 addr = hsotg->regs + GHWCFG4;
1877 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
1878 (unsigned long)addr, readl(addr));
1879 addr = hsotg->regs + GLPMCFG;
1880 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
1881 (unsigned long)addr, readl(addr));
1882 addr = hsotg->regs + GPWRDN;
1883 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
1884 (unsigned long)addr, readl(addr));
1885 addr = hsotg->regs + GDFIFOCFG;
1886 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
1887 (unsigned long)addr, readl(addr));
1888 addr = hsotg->regs + HPTXFSIZ;
1889 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
1890 (unsigned long)addr, readl(addr));
1891
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001892 addr = hsotg->regs + PCGCTL;
1893 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
1894 (unsigned long)addr, readl(addr));
1895#endif
1896}
1897
1898/**
1899 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1900 *
1901 * @hsotg: Programming view of DWC_otg controller
1902 * @num: Tx FIFO to flush
1903 */
1904void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1905{
1906 u32 greset;
1907 int count = 0;
1908
1909 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1910
1911 greset = GRSTCTL_TXFFLSH;
1912 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1913 writel(greset, hsotg->regs + GRSTCTL);
1914
1915 do {
1916 greset = readl(hsotg->regs + GRSTCTL);
1917 if (++count > 10000) {
1918 dev_warn(hsotg->dev,
1919 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1920 __func__, greset,
1921 readl(hsotg->regs + GNPTXSTS));
1922 break;
1923 }
1924 udelay(1);
1925 } while (greset & GRSTCTL_TXFFLSH);
1926
1927 /* Wait for at least 3 PHY Clocks */
1928 udelay(1);
1929}
1930
1931/**
1932 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1933 *
1934 * @hsotg: Programming view of DWC_otg controller
1935 */
1936void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1937{
1938 u32 greset;
1939 int count = 0;
1940
1941 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1942
1943 greset = GRSTCTL_RXFFLSH;
1944 writel(greset, hsotg->regs + GRSTCTL);
1945
1946 do {
1947 greset = readl(hsotg->regs + GRSTCTL);
1948 if (++count > 10000) {
1949 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1950 __func__, greset);
1951 break;
1952 }
1953 udelay(1);
1954 } while (greset & GRSTCTL_RXFFLSH);
1955
1956 /* Wait for at least 3 PHY Clocks */
1957 udelay(1);
1958}
1959
1960#define DWC2_PARAM_TEST(a, b, c) ((a) < (b) || (a) > (c))
1961
1962/* Parameter access functions */
1963int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1964{
1965 int valid = 1;
1966 int retval = 0;
1967 u32 op_mode;
1968
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001969 op_mode = (hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
1970 GHWCFG2_OP_MODE_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001971
1972 switch (val) {
1973 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
1974 if (op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
1975 valid = 0;
1976 break;
1977 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
1978 switch (op_mode) {
1979 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1980 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1981 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1982 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1983 break;
1984 default:
1985 valid = 0;
1986 break;
1987 }
1988 break;
1989 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
1990 /* always valid */
1991 break;
1992 default:
1993 valid = 0;
1994 break;
1995 }
1996
1997 if (!valid) {
1998 if (val >= 0)
1999 dev_err(hsotg->dev,
2000 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2001 val);
2002 switch (op_mode) {
2003 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2004 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2005 break;
2006 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2007 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2008 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2009 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2010 break;
2011 default:
2012 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2013 break;
2014 }
2015 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
2016 retval = -EINVAL;
2017 }
2018
2019 hsotg->core_params->otg_cap = val;
2020 return retval;
2021}
2022
2023int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2024{
2025 int valid = 1;
2026 int retval = 0;
2027
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002028 if (val > 0 && (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2029 GHWCFG2_ARCHITECTURE_SHIFT == GHWCFG2_SLAVE_ONLY_ARCH)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002030 valid = 0;
2031 if (val < 0)
2032 valid = 0;
2033
2034 if (!valid) {
2035 if (val >= 0)
2036 dev_err(hsotg->dev,
2037 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2038 val);
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002039 val = (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2040 GHWCFG2_ARCHITECTURE_SHIFT != GHWCFG2_SLAVE_ONLY_ARCH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002041 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2042 retval = -EINVAL;
2043 }
2044
2045 hsotg->core_params->dma_enable = val;
2046 return retval;
2047}
2048
2049int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2050{
2051 int valid = 1;
2052 int retval = 0;
2053
2054 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2055 !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA)))
2056 valid = 0;
2057 if (val < 0)
2058 valid = 0;
2059
2060 if (!valid) {
2061 if (val >= 0)
2062 dev_err(hsotg->dev,
2063 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2064 val);
2065 val = (hsotg->core_params->dma_enable > 0 &&
2066 (hsotg->hwcfg4 & GHWCFG4_DESC_DMA));
2067 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2068 retval = -EINVAL;
2069 }
2070
2071 hsotg->core_params->dma_desc_enable = val;
2072 return retval;
2073}
2074
2075int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2076 int val)
2077{
2078 int retval = 0;
2079
2080 if (DWC2_PARAM_TEST(val, 0, 1)) {
2081 if (val >= 0) {
2082 dev_err(hsotg->dev,
2083 "Wrong value for host_support_fs_low_power\n");
2084 dev_err(hsotg->dev,
2085 "host_support_fs_low_power must be 0 or 1\n");
2086 }
2087 val = 0;
2088 dev_dbg(hsotg->dev,
2089 "Setting host_support_fs_low_power to %d\n", val);
2090 retval = -EINVAL;
2091 }
2092
2093 hsotg->core_params->host_support_fs_ls_low_power = val;
2094 return retval;
2095}
2096
2097int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2098{
2099 int valid = 1;
2100 int retval = 0;
2101
2102 if (val > 0 && !(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO))
2103 valid = 0;
2104 if (val < 0)
2105 valid = 0;
2106
2107 if (!valid) {
2108 if (val >= 0)
2109 dev_err(hsotg->dev,
2110 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2111 val);
2112 val = !!(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2113 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2114 retval = -EINVAL;
2115 }
2116
2117 hsotg->core_params->enable_dynamic_fifo = val;
2118 return retval;
2119}
2120
2121int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2122{
2123 int valid = 1;
2124 int retval = 0;
2125
2126 if (val < 16 || val > readl(hsotg->regs + GRXFSIZ))
2127 valid = 0;
2128
2129 if (!valid) {
2130 if (val >= 0)
2131 dev_err(hsotg->dev,
2132 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2133 val);
2134 val = readl(hsotg->regs + GRXFSIZ);
2135 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2136 retval = -EINVAL;
2137 }
2138
2139 hsotg->core_params->host_rx_fifo_size = val;
2140 return retval;
2141}
2142
2143int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2144{
2145 int valid = 1;
2146 int retval = 0;
2147
2148 if (val < 16 || val > (readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff))
2149 valid = 0;
2150
2151 if (!valid) {
2152 if (val >= 0)
2153 dev_err(hsotg->dev,
2154 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2155 val);
2156 val = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
2157 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2158 val);
2159 retval = -EINVAL;
2160 }
2161
2162 hsotg->core_params->host_nperio_tx_fifo_size = val;
2163 return retval;
2164}
2165
2166int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2167{
2168 int valid = 1;
2169 int retval = 0;
2170
2171 if (val < 16 || val > (hsotg->hptxfsiz >> 16))
2172 valid = 0;
2173
2174 if (!valid) {
2175 if (val >= 0)
2176 dev_err(hsotg->dev,
2177 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2178 val);
2179 val = hsotg->hptxfsiz >> 16;
2180 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2181 val);
2182 retval = -EINVAL;
2183 }
2184
2185 hsotg->core_params->host_perio_tx_fifo_size = val;
2186 return retval;
2187}
2188
2189int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2190{
2191 int valid = 1;
2192 int retval = 0;
2193 int width = hsotg->hwcfg3 >> GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT &
2194 GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
2195 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2196
2197 if (val < 2047 || val >= (1 << (width + 11)))
2198 valid = 0;
2199
2200 if (!valid) {
2201 if (val >= 0)
2202 dev_err(hsotg->dev,
2203 "%d invalid for max_transfer_size. Check HW configuration.\n",
2204 val);
2205 val = (1 << (width + 11)) - 1;
2206 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2207 retval = -EINVAL;
2208 }
2209
2210 hsotg->core_params->max_transfer_size = val;
2211 return retval;
2212}
2213
2214int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2215{
2216 int valid = 1;
2217 int retval = 0;
2218 int width = hsotg->hwcfg3 >> GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT &
2219 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK >>
2220 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2221
Matthijs Kooijman3b9edf82013-08-30 18:45:12 +02002222 if (val < 15 || val >= (1 << (width + 4)))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002223 valid = 0;
2224
2225 if (!valid) {
2226 if (val >= 0)
2227 dev_err(hsotg->dev,
2228 "%d invalid for max_packet_count. Check HW configuration.\n",
2229 val);
2230 val = (1 << (width + 4)) - 1;
2231 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2232 retval = -EINVAL;
2233 }
2234
2235 hsotg->core_params->max_packet_count = val;
2236 return retval;
2237}
2238
2239int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2240{
2241 int valid = 1;
2242 int retval = 0;
2243 int num_chan = hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
2244 GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT;
2245
2246 if (val < 1 || val > num_chan + 1)
2247 valid = 0;
2248
2249 if (!valid) {
2250 if (val >= 0)
2251 dev_err(hsotg->dev,
2252 "%d invalid for host_channels. Check HW configuration.\n",
2253 val);
2254 val = num_chan + 1;
2255 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2256 retval = -EINVAL;
2257 }
2258
2259 hsotg->core_params->host_channels = val;
2260 return retval;
2261}
2262
2263int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2264{
2265#ifndef NO_FS_PHY_HW_CHECKS
2266 int valid = 0;
2267 u32 hs_phy_type;
2268 u32 fs_phy_type;
2269#endif
2270 int retval = 0;
2271
2272 if (DWC2_PARAM_TEST(val, DWC2_PHY_TYPE_PARAM_FS,
2273 DWC2_PHY_TYPE_PARAM_ULPI)) {
2274 if (val >= 0) {
2275 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2276 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2277 }
2278
2279#ifndef NO_FS_PHY_HW_CHECKS
2280 valid = 0;
2281#else
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002282 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002283 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2284 retval = -EINVAL;
2285#endif
2286 }
2287
2288#ifndef NO_FS_PHY_HW_CHECKS
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002289 hs_phy_type = (hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2290 GHWCFG2_HS_PHY_TYPE_SHIFT;
2291 fs_phy_type = (hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2292 GHWCFG2_FS_PHY_TYPE_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002293
2294 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2295 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2296 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2297 valid = 1;
2298 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2299 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2300 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2301 valid = 1;
2302 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2303 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2304 valid = 1;
2305
2306 if (!valid) {
2307 if (val >= 0)
2308 dev_err(hsotg->dev,
2309 "%d invalid for phy_type. Check HW configuration.\n",
2310 val);
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002311 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002312 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2313 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2314 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2315 val = DWC2_PHY_TYPE_PARAM_UTMI;
2316 else
2317 val = DWC2_PHY_TYPE_PARAM_ULPI;
2318 }
2319 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2320 retval = -EINVAL;
2321 }
2322#endif
2323
2324 hsotg->core_params->phy_type = val;
2325 return retval;
2326}
2327
2328static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2329{
2330 return hsotg->core_params->phy_type;
2331}
2332
2333int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2334{
2335 int valid = 1;
2336 int retval = 0;
2337
2338 if (DWC2_PARAM_TEST(val, 0, 1)) {
2339 if (val >= 0) {
2340 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2341 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2342 }
2343 valid = 0;
2344 }
2345
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002346 if (val == DWC2_SPEED_PARAM_HIGH &&
2347 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002348 valid = 0;
2349
2350 if (!valid) {
2351 if (val >= 0)
2352 dev_err(hsotg->dev,
2353 "%d invalid for speed parameter. Check HW configuration.\n",
2354 val);
2355 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002356 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002357 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2358 retval = -EINVAL;
2359 }
2360
2361 hsotg->core_params->speed = val;
2362 return retval;
2363}
2364
2365int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2366{
2367 int valid = 1;
2368 int retval = 0;
2369
2370 if (DWC2_PARAM_TEST(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2371 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2372 if (val >= 0) {
2373 dev_err(hsotg->dev,
2374 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2375 dev_err(hsotg->dev,
2376 "host_ls_low_power_phy_clk must be 0 or 1\n");
2377 }
2378 valid = 0;
2379 }
2380
2381 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2382 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2383 valid = 0;
2384
2385 if (!valid) {
2386 if (val >= 0)
2387 dev_err(hsotg->dev,
2388 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2389 val);
2390 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2391 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2392 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2393 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2394 val);
2395 retval = -EINVAL;
2396 }
2397
2398 hsotg->core_params->host_ls_low_power_phy_clk = val;
2399 return retval;
2400}
2401
2402int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2403{
2404 int retval = 0;
2405
2406 if (DWC2_PARAM_TEST(val, 0, 1)) {
2407 if (val >= 0) {
2408 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2409 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2410 }
2411 val = 0;
2412 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2413 retval = -EINVAL;
2414 }
2415
2416 hsotg->core_params->phy_ulpi_ddr = val;
2417 return retval;
2418}
2419
2420int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2421{
2422 int retval = 0;
2423
2424 if (DWC2_PARAM_TEST(val, 0, 1)) {
2425 if (val >= 0) {
2426 dev_err(hsotg->dev,
2427 "Wrong value for phy_ulpi_ext_vbus\n");
2428 dev_err(hsotg->dev,
2429 "phy_ulpi_ext_vbus must be 0 or 1\n");
2430 }
2431 val = 0;
2432 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2433 retval = -EINVAL;
2434 }
2435
2436 hsotg->core_params->phy_ulpi_ext_vbus = val;
2437 return retval;
2438}
2439
2440int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2441{
2442 int retval = 0;
2443
2444 if (DWC2_PARAM_TEST(val, 8, 8) && DWC2_PARAM_TEST(val, 16, 16)) {
2445 if (val >= 0) {
2446 dev_err(hsotg->dev, "Wrong value for phy_utmi_width\n");
2447 dev_err(hsotg->dev, "phy_utmi_width must be 8 or 16\n");
2448 }
2449 val = 8;
2450 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2451 retval = -EINVAL;
2452 }
2453
2454 hsotg->core_params->phy_utmi_width = val;
2455 return retval;
2456}
2457
2458int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2459{
2460 int retval = 0;
2461
2462 if (DWC2_PARAM_TEST(val, 0, 1)) {
2463 if (val >= 0) {
2464 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2465 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2466 }
2467 val = 0;
2468 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2469 retval = -EINVAL;
2470 }
2471
2472 hsotg->core_params->ulpi_fs_ls = val;
2473 return retval;
2474}
2475
2476int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2477{
2478 int retval = 0;
2479
2480 if (DWC2_PARAM_TEST(val, 0, 1)) {
2481 if (val >= 0) {
2482 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2483 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2484 }
2485 val = 0;
2486 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2487 retval = -EINVAL;
2488 }
2489
2490 hsotg->core_params->ts_dline = val;
2491 return retval;
2492}
2493
2494int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2495{
2496#ifndef NO_FS_PHY_HW_CHECKS
2497 int valid = 1;
2498#endif
2499 int retval = 0;
2500
2501 if (DWC2_PARAM_TEST(val, 0, 1)) {
2502 if (val >= 0) {
2503 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2504 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2505 }
2506
2507#ifndef NO_FS_PHY_HW_CHECKS
2508 valid = 0;
2509#else
2510 val = 0;
2511 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2512 retval = -EINVAL;
2513#endif
2514 }
2515
2516#ifndef NO_FS_PHY_HW_CHECKS
2517 if (val == 1 && !(hsotg->hwcfg3 & GHWCFG3_I2C))
2518 valid = 0;
2519
2520 if (!valid) {
2521 if (val >= 0)
2522 dev_err(hsotg->dev,
2523 "%d invalid for i2c_enable. Check HW configuration.\n",
2524 val);
2525 val = !!(hsotg->hwcfg3 & GHWCFG3_I2C);
2526 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2527 retval = -EINVAL;
2528 }
2529#endif
2530
2531 hsotg->core_params->i2c_enable = val;
2532 return retval;
2533}
2534
2535int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2536{
2537 int valid = 1;
2538 int retval = 0;
2539
2540 if (DWC2_PARAM_TEST(val, 0, 1)) {
2541 if (val >= 0) {
2542 dev_err(hsotg->dev,
2543 "Wrong value for en_multiple_tx_fifo,\n");
2544 dev_err(hsotg->dev,
2545 "en_multiple_tx_fifo must be 0 or 1\n");
2546 }
2547 valid = 0;
2548 }
2549
2550 if (val == 1 && !(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN))
2551 valid = 0;
2552
2553 if (!valid) {
2554 if (val >= 0)
2555 dev_err(hsotg->dev,
2556 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2557 val);
2558 val = !!(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN);
2559 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2560 retval = -EINVAL;
2561 }
2562
2563 hsotg->core_params->en_multiple_tx_fifo = val;
2564 return retval;
2565}
2566
2567int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2568{
2569 int valid = 1;
2570 int retval = 0;
2571
2572 if (DWC2_PARAM_TEST(val, 0, 1)) {
2573 if (val >= 0) {
2574 dev_err(hsotg->dev,
2575 "'%d' invalid for parameter reload_ctl\n", val);
2576 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2577 }
2578 valid = 0;
2579 }
2580
2581 if (val == 1 && hsotg->snpsid < DWC2_CORE_REV_2_92a)
2582 valid = 0;
2583
2584 if (!valid) {
2585 if (val >= 0)
2586 dev_err(hsotg->dev,
2587 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2588 val);
2589 val = hsotg->snpsid >= DWC2_CORE_REV_2_92a;
2590 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2591 retval = -EINVAL;
2592 }
2593
2594 hsotg->core_params->reload_ctl = val;
2595 return retval;
2596}
2597
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002598int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002599{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002600 if (val != -1)
2601 hsotg->core_params->ahbcfg = val;
2602 else
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002603 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2604 GAHBCFG_HBSTLEN_SHIFT;
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002605 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002606}
2607
2608int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2609{
2610 int retval = 0;
2611
2612 if (DWC2_PARAM_TEST(val, 0, 1)) {
2613 if (val >= 0) {
2614 dev_err(hsotg->dev,
2615 "'%d' invalid for parameter otg_ver\n", val);
2616 dev_err(hsotg->dev,
2617 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2618 }
2619 val = 0;
2620 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2621 retval = -EINVAL;
2622 }
2623
2624 hsotg->core_params->otg_ver = val;
2625 return retval;
2626}
2627
2628/*
2629 * This function is called during module intialization to pass module parameters
2630 * for the DWC_otg core. It returns non-0 if any parameters are invalid.
2631 */
2632int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
Stephen Warren90dbcea2013-04-29 19:49:08 +00002633 const struct dwc2_core_params *params)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002634{
2635 int retval = 0;
2636
2637 dev_dbg(hsotg->dev, "%s()\n", __func__);
2638
2639 retval |= dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2640 retval |= dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2641 retval |= dwc2_set_param_dma_desc_enable(hsotg,
2642 params->dma_desc_enable);
2643 retval |= dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2644 params->host_support_fs_ls_low_power);
2645 retval |= dwc2_set_param_enable_dynamic_fifo(hsotg,
2646 params->enable_dynamic_fifo);
2647 retval |= dwc2_set_param_host_rx_fifo_size(hsotg,
2648 params->host_rx_fifo_size);
2649 retval |= dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2650 params->host_nperio_tx_fifo_size);
2651 retval |= dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2652 params->host_perio_tx_fifo_size);
2653 retval |= dwc2_set_param_max_transfer_size(hsotg,
2654 params->max_transfer_size);
2655 retval |= dwc2_set_param_max_packet_count(hsotg,
2656 params->max_packet_count);
2657 retval |= dwc2_set_param_host_channels(hsotg, params->host_channels);
2658 retval |= dwc2_set_param_phy_type(hsotg, params->phy_type);
2659 retval |= dwc2_set_param_speed(hsotg, params->speed);
2660 retval |= dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2661 params->host_ls_low_power_phy_clk);
2662 retval |= dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2663 retval |= dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2664 params->phy_ulpi_ext_vbus);
2665 retval |= dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2666 retval |= dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2667 retval |= dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2668 retval |= dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2669 retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
2670 params->en_multiple_tx_fifo);
2671 retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002672 retval |= dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002673 retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2674
2675 return retval;
2676}
2677
2678u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2679{
2680 return (u16)(hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103);
2681}
2682
2683int dwc2_check_core_status(struct dwc2_hsotg *hsotg)
2684{
2685 if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
2686 return -1;
2687 else
2688 return 0;
2689}
2690
2691/**
2692 * dwc2_enable_global_interrupts() - Enables the controller's Global
2693 * Interrupt in the AHB Config register
2694 *
2695 * @hsotg: Programming view of DWC_otg controller
2696 */
2697void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2698{
2699 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2700
2701 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2702 writel(ahbcfg, hsotg->regs + GAHBCFG);
2703}
2704
2705/**
2706 * dwc2_disable_global_interrupts() - Disables the controller's Global
2707 * Interrupt in the AHB Config register
2708 *
2709 * @hsotg: Programming view of DWC_otg controller
2710 */
2711void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2712{
2713 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2714
2715 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2716 writel(ahbcfg, hsotg->regs + GAHBCFG);
2717}
2718
2719MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
2720MODULE_AUTHOR("Synopsys, Inc.");
2721MODULE_LICENSE("Dual BSD/GPL");