blob: 460aee68a3b0a5bea6abc7af77fe6e0a19dc1c51 [file] [log] [blame]
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001/*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/dma-mapping.h>
48#include <linux/delay.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/usb.h>
52
53#include <linux/usb/hcd.h>
54#include <linux/usb/ch11.h>
55
56#include "core.h"
57#include "hcd.h"
58
59/**
60 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
61 * used in both device and host modes
62 *
63 * @hsotg: Programming view of the DWC_otg controller
64 */
65static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
66{
67 u32 intmsk;
68
69 /* Clear any pending OTG Interrupts */
70 writel(0xffffffff, hsotg->regs + GOTGINT);
71
72 /* Clear any pending interrupts */
73 writel(0xffffffff, hsotg->regs + GINTSTS);
74
75 /* Enable the interrupts in the GINTMSK */
76 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
77
78 if (hsotg->core_params->dma_enable <= 0)
79 intmsk |= GINTSTS_RXFLVL;
80
81 intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
82 GINTSTS_SESSREQINT;
83
84 writel(intmsk, hsotg->regs + GINTMSK);
85}
86
87/*
88 * Initializes the FSLSPClkSel field of the HCFG register depending on the
89 * PHY type
90 */
91static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
92{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -070093 u32 hcfg, val;
94
Matthijs Kooijman9badec22013-08-30 18:45:21 +020095 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
96 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -070097 hsotg->core_params->ulpi_fs_ls > 0) ||
98 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
99 /* Full speed PHY */
100 val = HCFG_FSLSPCLKSEL_48_MHZ;
101 } else {
102 /* High speed PHY running at full speed or high speed */
103 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
104 }
105
106 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
107 hcfg = readl(hsotg->regs + HCFG);
108 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200109 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700110 writel(hcfg, hsotg->regs + HCFG);
111}
112
113/*
114 * Do core a soft reset of the core. Be careful with this because it
115 * resets all the internal state machines of the core.
116 */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100117static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700118{
119 u32 greset;
120 int count = 0;
121
122 dev_vdbg(hsotg->dev, "%s()\n", __func__);
123
124 /* Wait for AHB master IDLE state */
125 do {
126 usleep_range(20000, 40000);
127 greset = readl(hsotg->regs + GRSTCTL);
128 if (++count > 50) {
129 dev_warn(hsotg->dev,
130 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
131 __func__, greset);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100132 return -EBUSY;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700133 }
134 } while (!(greset & GRSTCTL_AHBIDLE));
135
136 /* Core Soft Reset */
137 count = 0;
138 greset |= GRSTCTL_CSFTRST;
139 writel(greset, hsotg->regs + GRSTCTL);
140 do {
141 usleep_range(20000, 40000);
142 greset = readl(hsotg->regs + GRSTCTL);
143 if (++count > 50) {
144 dev_warn(hsotg->dev,
145 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
146 __func__, greset);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100147 return -EBUSY;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700148 }
149 } while (greset & GRSTCTL_CSFTRST);
150
151 /*
152 * NOTE: This long sleep is _very_ important, otherwise the core will
153 * not stay in host mode after a connector ID change!
154 */
155 usleep_range(150000, 200000);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100156
157 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700158}
159
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100160static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700161{
162 u32 usbcfg, i2cctl;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100163 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700164
165 /*
166 * core_init() is now called on every switch so only call the
167 * following for the first time through
168 */
169 if (select_phy) {
170 dev_dbg(hsotg->dev, "FS PHY selected\n");
171 usbcfg = readl(hsotg->regs + GUSBCFG);
172 usbcfg |= GUSBCFG_PHYSEL;
173 writel(usbcfg, hsotg->regs + GUSBCFG);
174
175 /* Reset after a PHY select */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100176 retval = dwc2_core_reset(hsotg);
177 if (retval) {
178 dev_err(hsotg->dev, "%s() Reset failed, aborting",
179 __func__);
180 return retval;
181 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700182 }
183
184 /*
185 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
186 * do this on HNP Dev/Host mode switches (done in dev_init and
187 * host_init).
188 */
189 if (dwc2_is_host_mode(hsotg))
190 dwc2_init_fs_ls_pclk_sel(hsotg);
191
192 if (hsotg->core_params->i2c_enable > 0) {
193 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
194
195 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
196 usbcfg = readl(hsotg->regs + GUSBCFG);
197 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
198 writel(usbcfg, hsotg->regs + GUSBCFG);
199
200 /* Program GI2CCTL.I2CEn */
201 i2cctl = readl(hsotg->regs + GI2CCTL);
202 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
203 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
204 i2cctl &= ~GI2CCTL_I2CEN;
205 writel(i2cctl, hsotg->regs + GI2CCTL);
206 i2cctl |= GI2CCTL_I2CEN;
207 writel(i2cctl, hsotg->regs + GI2CCTL);
208 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100209
210 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700211}
212
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100213static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700214{
215 u32 usbcfg;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100216 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700217
218 if (!select_phy)
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100219 return -ENODEV;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700220
221 usbcfg = readl(hsotg->regs + GUSBCFG);
222
223 /*
224 * HS PHY parameters. These parameters are preserved during soft reset
225 * so only program the first time. Do a soft reset immediately after
226 * setting phyif.
227 */
228 switch (hsotg->core_params->phy_type) {
229 case DWC2_PHY_TYPE_PARAM_ULPI:
230 /* ULPI interface */
231 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
232 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
233 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
234 if (hsotg->core_params->phy_ulpi_ddr > 0)
235 usbcfg |= GUSBCFG_DDRSEL;
236 break;
237 case DWC2_PHY_TYPE_PARAM_UTMI:
238 /* UTMI+ interface */
239 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
240 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
241 if (hsotg->core_params->phy_utmi_width == 16)
242 usbcfg |= GUSBCFG_PHYIF16;
243 break;
244 default:
245 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
246 break;
247 }
248
249 writel(usbcfg, hsotg->regs + GUSBCFG);
250
251 /* Reset after setting the PHY parameters */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100252 retval = dwc2_core_reset(hsotg);
253 if (retval) {
254 dev_err(hsotg->dev, "%s() Reset failed, aborting",
255 __func__);
256 return retval;
257 }
258
259 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700260}
261
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100262static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700263{
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200264 u32 usbcfg;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100265 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700266
267 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
268 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
269 /* If FS mode with FS PHY */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100270 retval = dwc2_fs_phy_init(hsotg, select_phy);
271 if (retval)
272 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700273 } else {
274 /* High speed PHY */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100275 retval = dwc2_hs_phy_init(hsotg, select_phy);
276 if (retval)
277 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700278 }
279
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200280 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
281 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700282 hsotg->core_params->ulpi_fs_ls > 0) {
283 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
284 usbcfg = readl(hsotg->regs + GUSBCFG);
285 usbcfg |= GUSBCFG_ULPI_FS_LS;
286 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
287 writel(usbcfg, hsotg->regs + GUSBCFG);
288 } else {
289 usbcfg = readl(hsotg->regs + GUSBCFG);
290 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
291 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
292 writel(usbcfg, hsotg->regs + GUSBCFG);
293 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100294
295 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700296}
297
298static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
299{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700300 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700301
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200302 switch (hsotg->hw_params.arch) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700303 case GHWCFG2_EXT_DMA_ARCH:
304 dev_err(hsotg->dev, "External DMA Mode not supported\n");
305 return -EINVAL;
306
307 case GHWCFG2_INT_DMA_ARCH:
308 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700309 if (hsotg->core_params->ahbcfg != -1) {
310 ahbcfg &= GAHBCFG_CTRL_MASK;
311 ahbcfg |= hsotg->core_params->ahbcfg &
312 ~GAHBCFG_CTRL_MASK;
313 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700314 break;
315
316 case GHWCFG2_SLAVE_ONLY_ARCH:
317 default:
318 dev_dbg(hsotg->dev, "Slave Only Mode\n");
319 break;
320 }
321
322 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
323 hsotg->core_params->dma_enable,
324 hsotg->core_params->dma_desc_enable);
325
326 if (hsotg->core_params->dma_enable > 0) {
327 if (hsotg->core_params->dma_desc_enable > 0)
328 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
329 else
330 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
331 } else {
332 dev_dbg(hsotg->dev, "Using Slave mode\n");
333 hsotg->core_params->dma_desc_enable = 0;
334 }
335
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700336 if (hsotg->core_params->dma_enable > 0)
337 ahbcfg |= GAHBCFG_DMA_EN;
338
339 writel(ahbcfg, hsotg->regs + GAHBCFG);
340
341 return 0;
342}
343
344static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
345{
346 u32 usbcfg;
347
348 usbcfg = readl(hsotg->regs + GUSBCFG);
349 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
350
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200351 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700352 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
353 if (hsotg->core_params->otg_cap ==
354 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
355 usbcfg |= GUSBCFG_HNPCAP;
356 if (hsotg->core_params->otg_cap !=
357 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
358 usbcfg |= GUSBCFG_SRPCAP;
359 break;
360
361 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
362 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
363 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
364 if (hsotg->core_params->otg_cap !=
365 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
366 usbcfg |= GUSBCFG_SRPCAP;
367 break;
368
369 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
370 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
371 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
372 default:
373 break;
374 }
375
376 writel(usbcfg, hsotg->regs + GUSBCFG);
377}
378
379/**
380 * dwc2_core_init() - Initializes the DWC_otg controller registers and
381 * prepares the core for device mode or host mode operation
382 *
383 * @hsotg: Programming view of the DWC_otg controller
384 * @select_phy: If true then also set the Phy type
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200385 * @irq: If >= 0, the irq to register
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700386 */
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200387int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700388{
389 u32 usbcfg, otgctl;
390 int retval;
391
392 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
393
394 usbcfg = readl(hsotg->regs + GUSBCFG);
395
396 /* Set ULPI External VBUS bit if needed */
397 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
398 if (hsotg->core_params->phy_ulpi_ext_vbus ==
399 DWC2_PHY_ULPI_EXTERNAL_VBUS)
400 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
401
402 /* Set external TS Dline pulsing bit if needed */
403 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
404 if (hsotg->core_params->ts_dline > 0)
405 usbcfg |= GUSBCFG_TERMSELDLPULSE;
406
407 writel(usbcfg, hsotg->regs + GUSBCFG);
408
409 /* Reset the Controller */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100410 retval = dwc2_core_reset(hsotg);
411 if (retval) {
412 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
413 __func__);
414 return retval;
415 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700416
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700417 /*
418 * This needs to happen in FS mode before any other programming occurs
419 */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100420 retval = dwc2_phy_init(hsotg, select_phy);
421 if (retval)
422 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700423
424 /* Program the GAHBCFG Register */
425 retval = dwc2_gahbcfg_init(hsotg);
426 if (retval)
427 return retval;
428
429 /* Program the GUSBCFG register */
430 dwc2_gusbcfg_init(hsotg);
431
432 /* Program the GOTGCTL register */
433 otgctl = readl(hsotg->regs + GOTGCTL);
434 otgctl &= ~GOTGCTL_OTGVER;
435 if (hsotg->core_params->otg_ver > 0)
436 otgctl |= GOTGCTL_OTGVER;
437 writel(otgctl, hsotg->regs + GOTGCTL);
438 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
439
440 /* Clear the SRP success bit for FS-I2c */
441 hsotg->srp_success = 0;
442
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200443 if (irq >= 0) {
444 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
445 irq);
446 retval = devm_request_irq(hsotg->dev, irq,
447 dwc2_handle_common_intr, IRQF_SHARED,
448 dev_name(hsotg->dev), hsotg);
449 if (retval)
450 return retval;
451 }
452
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700453 /* Enable common interrupts */
454 dwc2_enable_common_interrupts(hsotg);
455
456 /*
457 * Do device or host intialization based on mode during PCD and
458 * HCD initialization
459 */
460 if (dwc2_is_host_mode(hsotg)) {
461 dev_dbg(hsotg->dev, "Host Mode\n");
462 hsotg->op_state = OTG_STATE_A_HOST;
463 } else {
464 dev_dbg(hsotg->dev, "Device Mode\n");
465 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
466 }
467
468 return 0;
469}
470
471/**
472 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
473 *
474 * @hsotg: Programming view of DWC_otg controller
475 */
476void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
477{
478 u32 intmsk;
479
480 dev_dbg(hsotg->dev, "%s()\n", __func__);
481
482 /* Disable all interrupts */
483 writel(0, hsotg->regs + GINTMSK);
484 writel(0, hsotg->regs + HAINTMSK);
485
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700486 /* Enable the common interrupts */
487 dwc2_enable_common_interrupts(hsotg);
488
489 /* Enable host mode interrupts without disturbing common interrupts */
490 intmsk = readl(hsotg->regs + GINTMSK);
491 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
492 writel(intmsk, hsotg->regs + GINTMSK);
493}
494
495/**
496 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
497 *
498 * @hsotg: Programming view of DWC_otg controller
499 */
500void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
501{
502 u32 intmsk = readl(hsotg->regs + GINTMSK);
503
504 /* Disable host mode interrupts without disturbing common interrupts */
505 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
506 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
507 writel(intmsk, hsotg->regs + GINTMSK);
508}
509
510static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
511{
512 struct dwc2_core_params *params = hsotg->core_params;
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200513 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700514
Matthijs Kooijman12086052013-04-29 19:46:35 +0000515 if (!params->enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700516 return;
517
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700518 /* Rx FIFO */
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200519 grxfsiz = readl(hsotg->regs + GRXFSIZ);
520 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
521 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
522 grxfsiz |= params->host_rx_fifo_size <<
523 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
524 writel(grxfsiz, hsotg->regs + GRXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700525 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
526
527 /* Non-periodic Tx FIFO */
528 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
529 readl(hsotg->regs + GNPTXFSIZ));
530 nptxfsiz = params->host_nperio_tx_fifo_size <<
531 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
532 nptxfsiz |= params->host_rx_fifo_size <<
533 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
534 writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
535 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
536 readl(hsotg->regs + GNPTXFSIZ));
537
538 /* Periodic Tx FIFO */
539 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
540 readl(hsotg->regs + HPTXFSIZ));
Matthijs Kooijmanc35205a2013-08-30 18:45:18 +0200541 hptxfsiz = params->host_perio_tx_fifo_size <<
542 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
543 hptxfsiz |= (params->host_rx_fifo_size +
544 params->host_nperio_tx_fifo_size) <<
545 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
546 writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700547 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
548 readl(hsotg->regs + HPTXFSIZ));
549
550 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200551 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700552 /*
553 * Global DFIFOCFG calculation for Host mode -
554 * include RxFIFO, NPTXFIFO and HPTXFIFO
555 */
556 dfifocfg = readl(hsotg->regs + GDFIFOCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700557 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
Matthijs Kooijman08b9f9d2013-08-30 18:45:19 +0200558 dfifocfg |= (params->host_rx_fifo_size +
559 params->host_nperio_tx_fifo_size +
560 params->host_perio_tx_fifo_size) <<
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700561 GDFIFOCFG_EPINFOBASE_SHIFT &
562 GDFIFOCFG_EPINFOBASE_MASK;
563 writel(dfifocfg, hsotg->regs + GDFIFOCFG);
564 }
565}
566
567/**
568 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
569 * Host mode
570 *
571 * @hsotg: Programming view of DWC_otg controller
572 *
573 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
574 * request queues. Host channels are reset to ensure that they are ready for
575 * performing transfers.
576 */
577void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
578{
579 u32 hcfg, hfir, otgctl;
580
581 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
582
583 /* Restart the Phy Clock */
584 writel(0, hsotg->regs + PCGCTL);
585
586 /* Initialize Host Configuration Register */
587 dwc2_init_fs_ls_pclk_sel(hsotg);
588 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
589 hcfg = readl(hsotg->regs + HCFG);
590 hcfg |= HCFG_FSLSSUPP;
591 writel(hcfg, hsotg->regs + HCFG);
592 }
593
594 /*
595 * This bit allows dynamic reloading of the HFIR register during
Masanari Iida0dcde5082013-09-13 23:34:36 +0900596 * runtime. This bit needs to be programmed during initial configuration
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700597 * and its value must not be changed during runtime.
598 */
599 if (hsotg->core_params->reload_ctl > 0) {
600 hfir = readl(hsotg->regs + HFIR);
601 hfir |= HFIR_RLDCTRL;
602 writel(hfir, hsotg->regs + HFIR);
603 }
604
605 if (hsotg->core_params->dma_desc_enable > 0) {
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200606 u32 op_mode = hsotg->hw_params.op_mode;
607 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
608 !hsotg->hw_params.dma_desc_enable ||
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700609 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
610 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
611 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
612 dev_err(hsotg->dev,
613 "Hardware does not support descriptor DMA mode -\n");
614 dev_err(hsotg->dev,
615 "falling back to buffer DMA mode.\n");
616 hsotg->core_params->dma_desc_enable = 0;
617 } else {
618 hcfg = readl(hsotg->regs + HCFG);
619 hcfg |= HCFG_DESCDMA;
620 writel(hcfg, hsotg->regs + HCFG);
621 }
622 }
623
624 /* Configure data FIFO sizes */
625 dwc2_config_fifos(hsotg);
626
627 /* TODO - check this */
628 /* Clear Host Set HNP Enable in the OTG Control Register */
629 otgctl = readl(hsotg->regs + GOTGCTL);
630 otgctl &= ~GOTGCTL_HSTSETHNPEN;
631 writel(otgctl, hsotg->regs + GOTGCTL);
632
633 /* Make sure the FIFOs are flushed */
634 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
635 dwc2_flush_rx_fifo(hsotg);
636
637 /* Clear Host Set HNP Enable in the OTG Control Register */
638 otgctl = readl(hsotg->regs + GOTGCTL);
639 otgctl &= ~GOTGCTL_HSTSETHNPEN;
640 writel(otgctl, hsotg->regs + GOTGCTL);
641
642 if (hsotg->core_params->dma_desc_enable <= 0) {
643 int num_channels, i;
644 u32 hcchar;
645
646 /* Flush out any leftover queued requests */
647 num_channels = hsotg->core_params->host_channels;
648 for (i = 0; i < num_channels; i++) {
649 hcchar = readl(hsotg->regs + HCCHAR(i));
650 hcchar &= ~HCCHAR_CHENA;
651 hcchar |= HCCHAR_CHDIS;
652 hcchar &= ~HCCHAR_EPDIR;
653 writel(hcchar, hsotg->regs + HCCHAR(i));
654 }
655
656 /* Halt all channels to put them into a known state */
657 for (i = 0; i < num_channels; i++) {
658 int count = 0;
659
660 hcchar = readl(hsotg->regs + HCCHAR(i));
661 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
662 hcchar &= ~HCCHAR_EPDIR;
663 writel(hcchar, hsotg->regs + HCCHAR(i));
664 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
665 __func__, i);
666 do {
667 hcchar = readl(hsotg->regs + HCCHAR(i));
668 if (++count > 1000) {
669 dev_err(hsotg->dev,
670 "Unable to clear enable on channel %d\n",
671 i);
672 break;
673 }
674 udelay(1);
675 } while (hcchar & HCCHAR_CHENA);
676 }
677 }
678
679 /* Turn on the vbus power */
680 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
681 if (hsotg->op_state == OTG_STATE_A_HOST) {
682 u32 hprt0 = dwc2_read_hprt0(hsotg);
683
684 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
685 !!(hprt0 & HPRT0_PWR));
686 if (!(hprt0 & HPRT0_PWR)) {
687 hprt0 |= HPRT0_PWR;
688 writel(hprt0, hsotg->regs + HPRT0);
689 }
690 }
691
692 dwc2_enable_host_interrupts(hsotg);
693}
694
695static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
696 struct dwc2_host_chan *chan)
697{
698 u32 hcintmsk = HCINTMSK_CHHLTD;
699
700 switch (chan->ep_type) {
701 case USB_ENDPOINT_XFER_CONTROL:
702 case USB_ENDPOINT_XFER_BULK:
703 dev_vdbg(hsotg->dev, "control/bulk\n");
704 hcintmsk |= HCINTMSK_XFERCOMPL;
705 hcintmsk |= HCINTMSK_STALL;
706 hcintmsk |= HCINTMSK_XACTERR;
707 hcintmsk |= HCINTMSK_DATATGLERR;
708 if (chan->ep_is_in) {
709 hcintmsk |= HCINTMSK_BBLERR;
710 } else {
711 hcintmsk |= HCINTMSK_NAK;
712 hcintmsk |= HCINTMSK_NYET;
713 if (chan->do_ping)
714 hcintmsk |= HCINTMSK_ACK;
715 }
716
717 if (chan->do_split) {
718 hcintmsk |= HCINTMSK_NAK;
719 if (chan->complete_split)
720 hcintmsk |= HCINTMSK_NYET;
721 else
722 hcintmsk |= HCINTMSK_ACK;
723 }
724
725 if (chan->error_state)
726 hcintmsk |= HCINTMSK_ACK;
727 break;
728
729 case USB_ENDPOINT_XFER_INT:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200730 if (dbg_perio())
731 dev_vdbg(hsotg->dev, "intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700732 hcintmsk |= HCINTMSK_XFERCOMPL;
733 hcintmsk |= HCINTMSK_NAK;
734 hcintmsk |= HCINTMSK_STALL;
735 hcintmsk |= HCINTMSK_XACTERR;
736 hcintmsk |= HCINTMSK_DATATGLERR;
737 hcintmsk |= HCINTMSK_FRMOVRUN;
738
739 if (chan->ep_is_in)
740 hcintmsk |= HCINTMSK_BBLERR;
741 if (chan->error_state)
742 hcintmsk |= HCINTMSK_ACK;
743 if (chan->do_split) {
744 if (chan->complete_split)
745 hcintmsk |= HCINTMSK_NYET;
746 else
747 hcintmsk |= HCINTMSK_ACK;
748 }
749 break;
750
751 case USB_ENDPOINT_XFER_ISOC:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200752 if (dbg_perio())
753 dev_vdbg(hsotg->dev, "isoc\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700754 hcintmsk |= HCINTMSK_XFERCOMPL;
755 hcintmsk |= HCINTMSK_FRMOVRUN;
756 hcintmsk |= HCINTMSK_ACK;
757
758 if (chan->ep_is_in) {
759 hcintmsk |= HCINTMSK_XACTERR;
760 hcintmsk |= HCINTMSK_BBLERR;
761 }
762 break;
763 default:
764 dev_err(hsotg->dev, "## Unknown EP type ##\n");
765 break;
766 }
767
768 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200769 if (dbg_hc(chan))
770 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700771}
772
773static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
774 struct dwc2_host_chan *chan)
775{
776 u32 hcintmsk = HCINTMSK_CHHLTD;
777
778 /*
779 * For Descriptor DMA mode core halts the channel on AHB error.
780 * Interrupt is not required.
781 */
782 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200783 if (dbg_hc(chan))
784 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700785 hcintmsk |= HCINTMSK_AHBERR;
786 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200787 if (dbg_hc(chan))
788 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700789 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
790 hcintmsk |= HCINTMSK_XFERCOMPL;
791 }
792
793 if (chan->error_state && !chan->do_split &&
794 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200795 if (dbg_hc(chan))
796 dev_vdbg(hsotg->dev, "setting ACK\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700797 hcintmsk |= HCINTMSK_ACK;
798 if (chan->ep_is_in) {
799 hcintmsk |= HCINTMSK_DATATGLERR;
800 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
801 hcintmsk |= HCINTMSK_NAK;
802 }
803 }
804
805 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200806 if (dbg_hc(chan))
807 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700808}
809
810static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
811 struct dwc2_host_chan *chan)
812{
813 u32 intmsk;
814
815 if (hsotg->core_params->dma_enable > 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200816 if (dbg_hc(chan))
817 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700818 dwc2_hc_enable_dma_ints(hsotg, chan);
819 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200820 if (dbg_hc(chan))
821 dev_vdbg(hsotg->dev, "DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700822 dwc2_hc_enable_slave_ints(hsotg, chan);
823 }
824
825 /* Enable the top level host channel interrupt */
826 intmsk = readl(hsotg->regs + HAINTMSK);
827 intmsk |= 1 << chan->hc_num;
828 writel(intmsk, hsotg->regs + HAINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200829 if (dbg_hc(chan))
830 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700831
832 /* Make sure host channel interrupts are enabled */
833 intmsk = readl(hsotg->regs + GINTMSK);
834 intmsk |= GINTSTS_HCHINT;
835 writel(intmsk, hsotg->regs + GINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200836 if (dbg_hc(chan))
837 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700838}
839
840/**
841 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
842 * a specific endpoint
843 *
844 * @hsotg: Programming view of DWC_otg controller
845 * @chan: Information needed to initialize the host channel
846 *
847 * The HCCHARn register is set up with the characteristics specified in chan.
848 * Host channel interrupts that may need to be serviced while this transfer is
849 * in progress are enabled.
850 */
851void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
852{
853 u8 hc_num = chan->hc_num;
854 u32 hcintmsk;
855 u32 hcchar;
856 u32 hcsplt = 0;
857
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200858 if (dbg_hc(chan))
859 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700860
861 /* Clear old interrupt conditions for this host channel */
862 hcintmsk = 0xffffffff;
863 hcintmsk &= ~HCINTMSK_RESERVED14_31;
864 writel(hcintmsk, hsotg->regs + HCINT(hc_num));
865
866 /* Enable channel interrupts required for this transfer */
867 dwc2_hc_enable_ints(hsotg, chan);
868
869 /*
870 * Program the HCCHARn register with the endpoint characteristics for
871 * the current transfer
872 */
873 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
874 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
875 if (chan->ep_is_in)
876 hcchar |= HCCHAR_EPDIR;
877 if (chan->speed == USB_SPEED_LOW)
878 hcchar |= HCCHAR_LSPDDEV;
879 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
880 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
881 writel(hcchar, hsotg->regs + HCCHAR(hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200882 if (dbg_hc(chan)) {
883 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
884 hc_num, hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700885
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200886 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
887 __func__, hc_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200888 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200889 chan->dev_addr);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200890 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200891 chan->ep_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200892 dev_vdbg(hsotg->dev, " Is In: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200893 chan->ep_is_in);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200894 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200895 chan->speed == USB_SPEED_LOW);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200896 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200897 chan->ep_type);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200898 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200899 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200900 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700901
902 /* Program the HCSPLT register for SPLITs */
903 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200904 if (dbg_hc(chan))
905 dev_vdbg(hsotg->dev,
906 "Programming HC %d with split --> %s\n",
907 hc_num,
908 chan->complete_split ? "CSPLIT" : "SSPLIT");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700909 if (chan->complete_split)
910 hcsplt |= HCSPLT_COMPSPLT;
911 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
912 HCSPLT_XACTPOS_MASK;
913 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
914 HCSPLT_HUBADDR_MASK;
915 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
916 HCSPLT_PRTADDR_MASK;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200917 if (dbg_hc(chan)) {
918 dev_vdbg(hsotg->dev, " comp split %d\n",
919 chan->complete_split);
920 dev_vdbg(hsotg->dev, " xact pos %d\n",
921 chan->xact_pos);
922 dev_vdbg(hsotg->dev, " hub addr %d\n",
923 chan->hub_addr);
924 dev_vdbg(hsotg->dev, " hub port %d\n",
925 chan->hub_port);
926 dev_vdbg(hsotg->dev, " is_in %d\n",
927 chan->ep_is_in);
928 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200929 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200930 dev_vdbg(hsotg->dev, " xferlen %d\n",
931 chan->xfer_len);
932 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700933 }
934
935 writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
936}
937
938/**
939 * dwc2_hc_halt() - Attempts to halt a host channel
940 *
941 * @hsotg: Controller register interface
942 * @chan: Host channel to halt
943 * @halt_status: Reason for halting the channel
944 *
945 * This function should only be called in Slave mode or to abort a transfer in
946 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
947 * controller halts the channel when the transfer is complete or a condition
948 * occurs that requires application intervention.
949 *
950 * In slave mode, checks for a free request queue entry, then sets the Channel
951 * Enable and Channel Disable bits of the Host Channel Characteristics
952 * register of the specified channel to intiate the halt. If there is no free
953 * request queue entry, sets only the Channel Disable bit of the HCCHARn
954 * register to flush requests for this channel. In the latter case, sets a
955 * flag to indicate that the host channel needs to be halted when a request
956 * queue slot is open.
957 *
958 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
959 * HCCHARn register. The controller ensures there is space in the request
960 * queue before submitting the halt request.
961 *
962 * Some time may elapse before the core flushes any posted requests for this
963 * host channel and halts. The Channel Halted interrupt handler completes the
964 * deactivation of the host channel.
965 */
966void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
967 enum dwc2_halt_status halt_status)
968{
969 u32 nptxsts, hptxsts, hcchar;
970
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200971 if (dbg_hc(chan))
972 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700973 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
974 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
975
976 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
977 halt_status == DWC2_HC_XFER_AHB_ERR) {
978 /*
979 * Disable all channel interrupts except Ch Halted. The QTD
980 * and QH state associated with this transfer has been cleared
981 * (in the case of URB_DEQUEUE), so the channel needs to be
982 * shut down carefully to prevent crashes.
983 */
984 u32 hcintmsk = HCINTMSK_CHHLTD;
985
986 dev_vdbg(hsotg->dev, "dequeue/error\n");
987 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
988
989 /*
990 * Make sure no other interrupts besides halt are currently
991 * pending. Handling another interrupt could cause a crash due
992 * to the QTD and QH state.
993 */
994 writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
995
996 /*
997 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
998 * even if the channel was already halted for some other
999 * reason
1000 */
1001 chan->halt_status = halt_status;
1002
1003 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1004 if (!(hcchar & HCCHAR_CHENA)) {
1005 /*
1006 * The channel is either already halted or it hasn't
1007 * started yet. In DMA mode, the transfer may halt if
1008 * it finishes normally or a condition occurs that
1009 * requires driver intervention. Don't want to halt
1010 * the channel again. In either Slave or DMA mode,
1011 * it's possible that the transfer has been assigned
1012 * to a channel, but not started yet when an URB is
1013 * dequeued. Don't want to halt a channel that hasn't
1014 * started yet.
1015 */
1016 return;
1017 }
1018 }
1019 if (chan->halt_pending) {
1020 /*
1021 * A halt has already been issued for this channel. This might
1022 * happen when a transfer is aborted by a higher level in
1023 * the stack.
1024 */
1025 dev_vdbg(hsotg->dev,
1026 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1027 __func__, chan->hc_num);
1028 return;
1029 }
1030
1031 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1032
1033 /* No need to set the bit in DDMA for disabling the channel */
1034 /* TODO check it everywhere channel is disabled */
1035 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001036 if (dbg_hc(chan))
1037 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001038 hcchar |= HCCHAR_CHENA;
1039 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001040 if (dbg_hc(chan))
1041 dev_dbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001042 }
1043 hcchar |= HCCHAR_CHDIS;
1044
1045 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001046 if (dbg_hc(chan))
1047 dev_vdbg(hsotg->dev, "DMA not enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001048 hcchar |= HCCHAR_CHENA;
1049
1050 /* Check for space in the request queue to issue the halt */
1051 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1052 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1053 dev_vdbg(hsotg->dev, "control/bulk\n");
1054 nptxsts = readl(hsotg->regs + GNPTXSTS);
1055 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1056 dev_vdbg(hsotg->dev, "Disabling channel\n");
1057 hcchar &= ~HCCHAR_CHENA;
1058 }
1059 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001060 if (dbg_perio())
1061 dev_vdbg(hsotg->dev, "isoc/intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001062 hptxsts = readl(hsotg->regs + HPTXSTS);
1063 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1064 hsotg->queuing_high_bandwidth) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001065 if (dbg_perio())
1066 dev_vdbg(hsotg->dev, "Disabling channel\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001067 hcchar &= ~HCCHAR_CHENA;
1068 }
1069 }
1070 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001071 if (dbg_hc(chan))
1072 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001073 }
1074
1075 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1076 chan->halt_status = halt_status;
1077
1078 if (hcchar & HCCHAR_CHENA) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001079 if (dbg_hc(chan))
1080 dev_vdbg(hsotg->dev, "Channel enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001081 chan->halt_pending = 1;
1082 chan->halt_on_queue = 0;
1083 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001084 if (dbg_hc(chan))
1085 dev_vdbg(hsotg->dev, "Channel disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001086 chan->halt_on_queue = 1;
1087 }
1088
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001089 if (dbg_hc(chan)) {
1090 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1091 chan->hc_num);
1092 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1093 hcchar);
1094 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1095 chan->halt_pending);
1096 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1097 chan->halt_on_queue);
1098 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1099 chan->halt_status);
1100 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001101}
1102
1103/**
1104 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1105 *
1106 * @hsotg: Programming view of DWC_otg controller
1107 * @chan: Identifies the host channel to clean up
1108 *
1109 * This function is normally called after a transfer is done and the host
1110 * channel is being released
1111 */
1112void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1113{
1114 u32 hcintmsk;
1115
1116 chan->xfer_started = 0;
1117
1118 /*
1119 * Clear channel interrupt enables and any unhandled channel interrupt
1120 * conditions
1121 */
1122 writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1123 hcintmsk = 0xffffffff;
1124 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1125 writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1126}
1127
1128/**
1129 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1130 * which frame a periodic transfer should occur
1131 *
1132 * @hsotg: Programming view of DWC_otg controller
1133 * @chan: Identifies the host channel to set up and its properties
1134 * @hcchar: Current value of the HCCHAR register for the specified host channel
1135 *
1136 * This function has no effect on non-periodic transfers
1137 */
1138static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1139 struct dwc2_host_chan *chan, u32 *hcchar)
1140{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001141 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1142 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001143 /* 1 if _next_ frame is odd, 0 if it's even */
Paul Zimmerman81a58952013-06-24 11:34:23 -07001144 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001145 *hcchar |= HCCHAR_ODDFRM;
1146 }
1147}
1148
1149static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1150{
1151 /* Set up the initial PID for the transfer */
1152 if (chan->speed == USB_SPEED_HIGH) {
1153 if (chan->ep_is_in) {
1154 if (chan->multi_count == 1)
1155 chan->data_pid_start = DWC2_HC_PID_DATA0;
1156 else if (chan->multi_count == 2)
1157 chan->data_pid_start = DWC2_HC_PID_DATA1;
1158 else
1159 chan->data_pid_start = DWC2_HC_PID_DATA2;
1160 } else {
1161 if (chan->multi_count == 1)
1162 chan->data_pid_start = DWC2_HC_PID_DATA0;
1163 else
1164 chan->data_pid_start = DWC2_HC_PID_MDATA;
1165 }
1166 } else {
1167 chan->data_pid_start = DWC2_HC_PID_DATA0;
1168 }
1169}
1170
1171/**
1172 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1173 * the Host Channel
1174 *
1175 * @hsotg: Programming view of DWC_otg controller
1176 * @chan: Information needed to initialize the host channel
1177 *
1178 * This function should only be called in Slave mode. For a channel associated
1179 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1180 * associated with a periodic EP, the periodic Tx FIFO is written.
1181 *
1182 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1183 * the number of bytes written to the Tx FIFO.
1184 */
1185static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1186 struct dwc2_host_chan *chan)
1187{
1188 u32 i;
1189 u32 remaining_count;
1190 u32 byte_count;
1191 u32 dword_count;
1192 u32 __iomem *data_fifo;
1193 u32 *data_buf = (u32 *)chan->xfer_buf;
1194
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001195 if (dbg_hc(chan))
1196 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001197
1198 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1199
1200 remaining_count = chan->xfer_len - chan->xfer_count;
1201 if (remaining_count > chan->max_packet)
1202 byte_count = chan->max_packet;
1203 else
1204 byte_count = remaining_count;
1205
1206 dword_count = (byte_count + 3) / 4;
1207
1208 if (((unsigned long)data_buf & 0x3) == 0) {
1209 /* xfer_buf is DWORD aligned */
1210 for (i = 0; i < dword_count; i++, data_buf++)
1211 writel(*data_buf, data_fifo);
1212 } else {
1213 /* xfer_buf is not DWORD aligned */
1214 for (i = 0; i < dword_count; i++, data_buf++) {
1215 u32 data = data_buf[0] | data_buf[1] << 8 |
1216 data_buf[2] << 16 | data_buf[3] << 24;
1217 writel(data, data_fifo);
1218 }
1219 }
1220
1221 chan->xfer_count += byte_count;
1222 chan->xfer_buf += byte_count;
1223}
1224
1225/**
1226 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1227 * channel and starts the transfer
1228 *
1229 * @hsotg: Programming view of DWC_otg controller
1230 * @chan: Information needed to initialize the host channel. The xfer_len value
1231 * may be reduced to accommodate the max widths of the XferSize and
1232 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1233 * changed to reflect the final xfer_len value.
1234 *
1235 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1236 * the caller must ensure that there is sufficient space in the request queue
1237 * and Tx Data FIFO.
1238 *
1239 * For an OUT transfer in Slave mode, it loads a data packet into the
1240 * appropriate FIFO. If necessary, additional data packets are loaded in the
1241 * Host ISR.
1242 *
1243 * For an IN transfer in Slave mode, a data packet is requested. The data
1244 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1245 * additional data packets are requested in the Host ISR.
1246 *
1247 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1248 * register along with a packet count of 1 and the channel is enabled. This
1249 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1250 * simply set to 0 since no data transfer occurs in this case.
1251 *
1252 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1253 * all the information required to perform the subsequent data transfer. In
1254 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1255 * controller performs the entire PING protocol, then starts the data
1256 * transfer.
1257 */
1258void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1259 struct dwc2_host_chan *chan)
1260{
1261 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1262 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1263 u32 hcchar;
1264 u32 hctsiz = 0;
1265 u16 num_packets;
1266
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001267 if (dbg_hc(chan))
1268 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001269
1270 if (chan->do_ping) {
1271 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001272 if (dbg_hc(chan))
1273 dev_vdbg(hsotg->dev, "ping, no DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001274 dwc2_hc_do_ping(hsotg, chan);
1275 chan->xfer_started = 1;
1276 return;
1277 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001278 if (dbg_hc(chan))
1279 dev_vdbg(hsotg->dev, "ping, DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001280 hctsiz |= TSIZ_DOPNG;
1281 }
1282 }
1283
1284 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001285 if (dbg_hc(chan))
1286 dev_vdbg(hsotg->dev, "split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001287 num_packets = 1;
1288
1289 if (chan->complete_split && !chan->ep_is_in)
1290 /*
1291 * For CSPLIT OUT Transfer, set the size to 0 so the
1292 * core doesn't expect any data written to the FIFO
1293 */
1294 chan->xfer_len = 0;
1295 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1296 chan->xfer_len = chan->max_packet;
1297 else if (!chan->ep_is_in && chan->xfer_len > 188)
1298 chan->xfer_len = 188;
1299
1300 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1301 TSIZ_XFERSIZE_MASK;
1302 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001303 if (dbg_hc(chan))
1304 dev_vdbg(hsotg->dev, "no split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001305 /*
1306 * Ensure that the transfer length and packet count will fit
1307 * in the widths allocated for them in the HCTSIZn register
1308 */
1309 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1310 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1311 /*
1312 * Make sure the transfer size is no larger than one
1313 * (micro)frame's worth of data. (A check was done
1314 * when the periodic transfer was accepted to ensure
1315 * that a (micro)frame's worth of data can be
1316 * programmed into a channel.)
1317 */
1318 u32 max_periodic_len =
1319 chan->multi_count * chan->max_packet;
1320
1321 if (chan->xfer_len > max_periodic_len)
1322 chan->xfer_len = max_periodic_len;
1323 } else if (chan->xfer_len > max_hc_xfer_size) {
1324 /*
1325 * Make sure that xfer_len is a multiple of max packet
1326 * size
1327 */
1328 chan->xfer_len =
1329 max_hc_xfer_size - chan->max_packet + 1;
1330 }
1331
1332 if (chan->xfer_len > 0) {
1333 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1334 chan->max_packet;
1335 if (num_packets > max_hc_pkt_count) {
1336 num_packets = max_hc_pkt_count;
1337 chan->xfer_len = num_packets * chan->max_packet;
1338 }
1339 } else {
1340 /* Need 1 packet for transfer length of 0 */
1341 num_packets = 1;
1342 }
1343
1344 if (chan->ep_is_in)
1345 /*
1346 * Always program an integral # of max packets for IN
1347 * transfers
1348 */
1349 chan->xfer_len = num_packets * chan->max_packet;
1350
1351 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1352 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1353 /*
1354 * Make sure that the multi_count field matches the
1355 * actual transfer length
1356 */
1357 chan->multi_count = num_packets;
1358
1359 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1360 dwc2_set_pid_isoc(chan);
1361
1362 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1363 TSIZ_XFERSIZE_MASK;
1364 }
1365
1366 chan->start_pkt_count = num_packets;
1367 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1368 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1369 TSIZ_SC_MC_PID_MASK;
1370 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001371 if (dbg_hc(chan)) {
1372 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1373 hctsiz, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001374
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001375 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1376 chan->hc_num);
1377 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001378 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1379 TSIZ_XFERSIZE_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001380 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001381 (hctsiz & TSIZ_PKTCNT_MASK) >>
1382 TSIZ_PKTCNT_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001383 dev_vdbg(hsotg->dev, " Start PID: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001384 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1385 TSIZ_SC_MC_PID_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001386 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001387
1388 if (hsotg->core_params->dma_enable > 0) {
1389 dma_addr_t dma_addr;
1390
1391 if (chan->align_buf) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001392 if (dbg_hc(chan))
1393 dev_vdbg(hsotg->dev, "align_buf\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001394 dma_addr = chan->align_buf;
1395 } else {
1396 dma_addr = chan->xfer_dma;
1397 }
1398 writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001399 if (dbg_hc(chan))
1400 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1401 (unsigned long)dma_addr, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001402 }
1403
1404 /* Start the split */
1405 if (chan->do_split) {
1406 u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1407
1408 hcsplt |= HCSPLT_SPLTENA;
1409 writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1410 }
1411
1412 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1413 hcchar &= ~HCCHAR_MULTICNT_MASK;
1414 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1415 HCCHAR_MULTICNT_MASK;
1416 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1417
1418 if (hcchar & HCCHAR_CHDIS)
1419 dev_warn(hsotg->dev,
1420 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1421 __func__, chan->hc_num, hcchar);
1422
1423 /* Set host channel enable after all other setup is complete */
1424 hcchar |= HCCHAR_CHENA;
1425 hcchar &= ~HCCHAR_CHDIS;
1426
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001427 if (dbg_hc(chan))
1428 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001429 (hcchar & HCCHAR_MULTICNT_MASK) >>
1430 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001431
1432 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001433 if (dbg_hc(chan))
1434 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1435 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001436
1437 chan->xfer_started = 1;
1438 chan->requests++;
1439
1440 if (hsotg->core_params->dma_enable <= 0 &&
1441 !chan->ep_is_in && chan->xfer_len > 0)
1442 /* Load OUT packet into the appropriate Tx FIFO */
1443 dwc2_hc_write_packet(hsotg, chan);
1444}
1445
1446/**
1447 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1448 * host channel and starts the transfer in Descriptor DMA mode
1449 *
1450 * @hsotg: Programming view of DWC_otg controller
1451 * @chan: Information needed to initialize the host channel
1452 *
1453 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1454 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1455 * with micro-frame bitmap.
1456 *
1457 * Initializes HCDMA register with descriptor list address and CTD value then
1458 * starts the transfer via enabling the channel.
1459 */
1460void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1461 struct dwc2_host_chan *chan)
1462{
1463 u32 hcchar;
1464 u32 hc_dma;
1465 u32 hctsiz = 0;
1466
1467 if (chan->do_ping)
1468 hctsiz |= TSIZ_DOPNG;
1469
1470 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1471 dwc2_set_pid_isoc(chan);
1472
1473 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1474 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1475 TSIZ_SC_MC_PID_MASK;
1476
1477 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1478 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1479
1480 /* Non-zero only for high-speed interrupt endpoints */
1481 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1482
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001483 if (dbg_hc(chan)) {
1484 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1485 chan->hc_num);
1486 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1487 chan->data_pid_start);
1488 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1489 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001490
1491 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1492
1493 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1494
1495 /* Always start from first descriptor */
1496 hc_dma &= ~HCDMA_CTD_MASK;
1497 writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001498 if (dbg_hc(chan))
1499 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1500 hc_dma, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001501
1502 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1503 hcchar &= ~HCCHAR_MULTICNT_MASK;
1504 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1505 HCCHAR_MULTICNT_MASK;
1506
1507 if (hcchar & HCCHAR_CHDIS)
1508 dev_warn(hsotg->dev,
1509 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1510 __func__, chan->hc_num, hcchar);
1511
1512 /* Set host channel enable after all other setup is complete */
1513 hcchar |= HCCHAR_CHENA;
1514 hcchar &= ~HCCHAR_CHDIS;
1515
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001516 if (dbg_hc(chan))
1517 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001518 (hcchar & HCCHAR_MULTICNT_MASK) >>
1519 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001520
1521 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001522 if (dbg_hc(chan))
1523 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1524 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001525
1526 chan->xfer_started = 1;
1527 chan->requests++;
1528}
1529
1530/**
1531 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1532 * a previous call to dwc2_hc_start_transfer()
1533 *
1534 * @hsotg: Programming view of DWC_otg controller
1535 * @chan: Information needed to initialize the host channel
1536 *
1537 * The caller must ensure there is sufficient space in the request queue and Tx
1538 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1539 * the controller acts autonomously to complete transfers programmed to a host
1540 * channel.
1541 *
1542 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1543 * if there is any data remaining to be queued. For an IN transfer, another
1544 * data packet is always requested. For the SETUP phase of a control transfer,
1545 * this function does nothing.
1546 *
1547 * Return: 1 if a new request is queued, 0 if no more requests are required
1548 * for this transfer
1549 */
1550int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1551 struct dwc2_host_chan *chan)
1552{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001553 if (dbg_hc(chan))
1554 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1555 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001556
1557 if (chan->do_split)
1558 /* SPLITs always queue just once per channel */
1559 return 0;
1560
1561 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1562 /* SETUPs are queued only once since they can't be NAK'd */
1563 return 0;
1564
1565 if (chan->ep_is_in) {
1566 /*
1567 * Always queue another request for other IN transfers. If
1568 * back-to-back INs are issued and NAKs are received for both,
1569 * the driver may still be processing the first NAK when the
1570 * second NAK is received. When the interrupt handler clears
1571 * the NAK interrupt for the first NAK, the second NAK will
1572 * not be seen. So we can't depend on the NAK interrupt
1573 * handler to requeue a NAK'd request. Instead, IN requests
1574 * are issued each time this function is called. When the
1575 * transfer completes, the extra requests for the channel will
1576 * be flushed.
1577 */
1578 u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1579
1580 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1581 hcchar |= HCCHAR_CHENA;
1582 hcchar &= ~HCCHAR_CHDIS;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001583 if (dbg_hc(chan))
1584 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1585 hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001586 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1587 chan->requests++;
1588 return 1;
1589 }
1590
1591 /* OUT transfers */
1592
1593 if (chan->xfer_count < chan->xfer_len) {
1594 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1595 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1596 u32 hcchar = readl(hsotg->regs +
1597 HCCHAR(chan->hc_num));
1598
1599 dwc2_hc_set_even_odd_frame(hsotg, chan,
1600 &hcchar);
1601 }
1602
1603 /* Load OUT packet into the appropriate Tx FIFO */
1604 dwc2_hc_write_packet(hsotg, chan);
1605 chan->requests++;
1606 return 1;
1607 }
1608
1609 return 0;
1610}
1611
1612/**
1613 * dwc2_hc_do_ping() - Starts a PING transfer
1614 *
1615 * @hsotg: Programming view of DWC_otg controller
1616 * @chan: Information needed to initialize the host channel
1617 *
1618 * This function should only be called in Slave mode. The Do Ping bit is set in
1619 * the HCTSIZ register, then the channel is enabled.
1620 */
1621void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1622{
1623 u32 hcchar;
1624 u32 hctsiz;
1625
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001626 if (dbg_hc(chan))
1627 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1628 chan->hc_num);
1629
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001630
1631 hctsiz = TSIZ_DOPNG;
1632 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1633 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1634
1635 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1636 hcchar |= HCCHAR_CHENA;
1637 hcchar &= ~HCCHAR_CHDIS;
1638 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1639}
1640
1641/**
1642 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1643 * the HFIR register according to PHY type and speed
1644 *
1645 * @hsotg: Programming view of DWC_otg controller
1646 *
1647 * NOTE: The caller can modify the value of the HFIR register only after the
1648 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1649 * has been set
1650 */
1651u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1652{
1653 u32 usbcfg;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001654 u32 hprt0;
1655 int clock = 60; /* default value */
1656
1657 usbcfg = readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001658 hprt0 = readl(hsotg->regs + HPRT0);
1659
1660 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1661 !(usbcfg & GUSBCFG_PHYIF16))
1662 clock = 60;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001663 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001664 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1665 clock = 48;
1666 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1667 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1668 clock = 30;
1669 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1670 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1671 clock = 60;
1672 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1673 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1674 clock = 48;
1675 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001676 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001677 clock = 48;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001678 if ((usbcfg & GUSBCFG_PHYSEL) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001679 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001680 clock = 48;
1681
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001682 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001683 /* High speed case */
1684 return 125 * clock;
1685 else
1686 /* FS/LS case */
1687 return 1000 * clock;
1688}
1689
1690/**
1691 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1692 * buffer
1693 *
1694 * @core_if: Programming view of DWC_otg controller
1695 * @dest: Destination buffer for the packet
1696 * @bytes: Number of bytes to copy to the destination
1697 */
1698void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1699{
1700 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
1701 u32 *data_buf = (u32 *)dest;
1702 int word_count = (bytes + 3) / 4;
1703 int i;
1704
1705 /*
1706 * Todo: Account for the case where dest is not dword aligned. This
1707 * requires reading data from the FIFO into a u32 temp buffer, then
1708 * moving it into the data buffer.
1709 */
1710
1711 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1712
1713 for (i = 0; i < word_count; i++, data_buf++)
1714 *data_buf = readl(fifo);
1715}
1716
1717/**
1718 * dwc2_dump_host_registers() - Prints the host registers
1719 *
1720 * @hsotg: Programming view of DWC_otg controller
1721 *
1722 * NOTE: This function will be removed once the peripheral controller code
1723 * is integrated and the driver is stable
1724 */
1725void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1726{
1727#ifdef DEBUG
1728 u32 __iomem *addr;
1729 int i;
1730
1731 dev_dbg(hsotg->dev, "Host Global Registers\n");
1732 addr = hsotg->regs + HCFG;
1733 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
1734 (unsigned long)addr, readl(addr));
1735 addr = hsotg->regs + HFIR;
1736 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
1737 (unsigned long)addr, readl(addr));
1738 addr = hsotg->regs + HFNUM;
1739 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
1740 (unsigned long)addr, readl(addr));
1741 addr = hsotg->regs + HPTXSTS;
1742 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
1743 (unsigned long)addr, readl(addr));
1744 addr = hsotg->regs + HAINT;
1745 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
1746 (unsigned long)addr, readl(addr));
1747 addr = hsotg->regs + HAINTMSK;
1748 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
1749 (unsigned long)addr, readl(addr));
1750 if (hsotg->core_params->dma_desc_enable > 0) {
1751 addr = hsotg->regs + HFLBADDR;
1752 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1753 (unsigned long)addr, readl(addr));
1754 }
1755
1756 addr = hsotg->regs + HPRT0;
1757 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
1758 (unsigned long)addr, readl(addr));
1759
1760 for (i = 0; i < hsotg->core_params->host_channels; i++) {
1761 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1762 addr = hsotg->regs + HCCHAR(i);
1763 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
1764 (unsigned long)addr, readl(addr));
1765 addr = hsotg->regs + HCSPLT(i);
1766 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
1767 (unsigned long)addr, readl(addr));
1768 addr = hsotg->regs + HCINT(i);
1769 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
1770 (unsigned long)addr, readl(addr));
1771 addr = hsotg->regs + HCINTMSK(i);
1772 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
1773 (unsigned long)addr, readl(addr));
1774 addr = hsotg->regs + HCTSIZ(i);
1775 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
1776 (unsigned long)addr, readl(addr));
1777 addr = hsotg->regs + HCDMA(i);
1778 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
1779 (unsigned long)addr, readl(addr));
1780 if (hsotg->core_params->dma_desc_enable > 0) {
1781 addr = hsotg->regs + HCDMAB(i);
1782 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
1783 (unsigned long)addr, readl(addr));
1784 }
1785 }
1786#endif
1787}
1788
1789/**
1790 * dwc2_dump_global_registers() - Prints the core global registers
1791 *
1792 * @hsotg: Programming view of DWC_otg controller
1793 *
1794 * NOTE: This function will be removed once the peripheral controller code
1795 * is integrated and the driver is stable
1796 */
1797void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1798{
1799#ifdef DEBUG
1800 u32 __iomem *addr;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001801
1802 dev_dbg(hsotg->dev, "Core Global Registers\n");
1803 addr = hsotg->regs + GOTGCTL;
1804 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
1805 (unsigned long)addr, readl(addr));
1806 addr = hsotg->regs + GOTGINT;
1807 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
1808 (unsigned long)addr, readl(addr));
1809 addr = hsotg->regs + GAHBCFG;
1810 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
1811 (unsigned long)addr, readl(addr));
1812 addr = hsotg->regs + GUSBCFG;
1813 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
1814 (unsigned long)addr, readl(addr));
1815 addr = hsotg->regs + GRSTCTL;
1816 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
1817 (unsigned long)addr, readl(addr));
1818 addr = hsotg->regs + GINTSTS;
1819 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
1820 (unsigned long)addr, readl(addr));
1821 addr = hsotg->regs + GINTMSK;
1822 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
1823 (unsigned long)addr, readl(addr));
1824 addr = hsotg->regs + GRXSTSR;
1825 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
1826 (unsigned long)addr, readl(addr));
1827 addr = hsotg->regs + GRXFSIZ;
1828 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
1829 (unsigned long)addr, readl(addr));
1830 addr = hsotg->regs + GNPTXFSIZ;
1831 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
1832 (unsigned long)addr, readl(addr));
1833 addr = hsotg->regs + GNPTXSTS;
1834 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
1835 (unsigned long)addr, readl(addr));
1836 addr = hsotg->regs + GI2CCTL;
1837 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
1838 (unsigned long)addr, readl(addr));
1839 addr = hsotg->regs + GPVNDCTL;
1840 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
1841 (unsigned long)addr, readl(addr));
1842 addr = hsotg->regs + GGPIO;
1843 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
1844 (unsigned long)addr, readl(addr));
1845 addr = hsotg->regs + GUID;
1846 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
1847 (unsigned long)addr, readl(addr));
1848 addr = hsotg->regs + GSNPSID;
1849 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
1850 (unsigned long)addr, readl(addr));
1851 addr = hsotg->regs + GHWCFG1;
1852 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
1853 (unsigned long)addr, readl(addr));
1854 addr = hsotg->regs + GHWCFG2;
1855 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
1856 (unsigned long)addr, readl(addr));
1857 addr = hsotg->regs + GHWCFG3;
1858 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
1859 (unsigned long)addr, readl(addr));
1860 addr = hsotg->regs + GHWCFG4;
1861 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
1862 (unsigned long)addr, readl(addr));
1863 addr = hsotg->regs + GLPMCFG;
1864 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
1865 (unsigned long)addr, readl(addr));
1866 addr = hsotg->regs + GPWRDN;
1867 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
1868 (unsigned long)addr, readl(addr));
1869 addr = hsotg->regs + GDFIFOCFG;
1870 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
1871 (unsigned long)addr, readl(addr));
1872 addr = hsotg->regs + HPTXFSIZ;
1873 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
1874 (unsigned long)addr, readl(addr));
1875
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001876 addr = hsotg->regs + PCGCTL;
1877 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
1878 (unsigned long)addr, readl(addr));
1879#endif
1880}
1881
1882/**
1883 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1884 *
1885 * @hsotg: Programming view of DWC_otg controller
1886 * @num: Tx FIFO to flush
1887 */
1888void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1889{
1890 u32 greset;
1891 int count = 0;
1892
1893 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1894
1895 greset = GRSTCTL_TXFFLSH;
1896 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1897 writel(greset, hsotg->regs + GRSTCTL);
1898
1899 do {
1900 greset = readl(hsotg->regs + GRSTCTL);
1901 if (++count > 10000) {
1902 dev_warn(hsotg->dev,
1903 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1904 __func__, greset,
1905 readl(hsotg->regs + GNPTXSTS));
1906 break;
1907 }
1908 udelay(1);
1909 } while (greset & GRSTCTL_TXFFLSH);
1910
1911 /* Wait for at least 3 PHY Clocks */
1912 udelay(1);
1913}
1914
1915/**
1916 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1917 *
1918 * @hsotg: Programming view of DWC_otg controller
1919 */
1920void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1921{
1922 u32 greset;
1923 int count = 0;
1924
1925 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1926
1927 greset = GRSTCTL_RXFFLSH;
1928 writel(greset, hsotg->regs + GRSTCTL);
1929
1930 do {
1931 greset = readl(hsotg->regs + GRSTCTL);
1932 if (++count > 10000) {
1933 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1934 __func__, greset);
1935 break;
1936 }
1937 udelay(1);
1938 } while (greset & GRSTCTL_RXFFLSH);
1939
1940 /* Wait for at least 3 PHY Clocks */
1941 udelay(1);
1942}
1943
Paul Zimmerman498f0662013-11-22 16:43:47 -08001944#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001945
1946/* Parameter access functions */
1947int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1948{
1949 int valid = 1;
1950 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001951
1952 switch (val) {
1953 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001954 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001955 valid = 0;
1956 break;
1957 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001958 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001959 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1960 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1961 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1962 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1963 break;
1964 default:
1965 valid = 0;
1966 break;
1967 }
1968 break;
1969 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
1970 /* always valid */
1971 break;
1972 default:
1973 valid = 0;
1974 break;
1975 }
1976
1977 if (!valid) {
1978 if (val >= 0)
1979 dev_err(hsotg->dev,
1980 "%d invalid for otg_cap parameter. Check HW configuration.\n",
1981 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001982 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001983 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1984 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
1985 break;
1986 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1987 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1988 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1989 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
1990 break;
1991 default:
1992 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
1993 break;
1994 }
1995 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
1996 retval = -EINVAL;
1997 }
1998
1999 hsotg->core_params->otg_cap = val;
2000 return retval;
2001}
2002
2003int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
2004{
2005 int valid = 1;
2006 int retval = 0;
2007
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002008 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002009 valid = 0;
2010 if (val < 0)
2011 valid = 0;
2012
2013 if (!valid) {
2014 if (val >= 0)
2015 dev_err(hsotg->dev,
2016 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2017 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002018 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002019 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
2020 retval = -EINVAL;
2021 }
2022
2023 hsotg->core_params->dma_enable = val;
2024 return retval;
2025}
2026
2027int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
2028{
2029 int valid = 1;
2030 int retval = 0;
2031
2032 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002033 !hsotg->hw_params.dma_desc_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002034 valid = 0;
2035 if (val < 0)
2036 valid = 0;
2037
2038 if (!valid) {
2039 if (val >= 0)
2040 dev_err(hsotg->dev,
2041 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2042 val);
2043 val = (hsotg->core_params->dma_enable > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002044 hsotg->hw_params.dma_desc_enable);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002045 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2046 retval = -EINVAL;
2047 }
2048
2049 hsotg->core_params->dma_desc_enable = val;
2050 return retval;
2051}
2052
2053int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2054 int val)
2055{
2056 int retval = 0;
2057
Paul Zimmerman498f0662013-11-22 16:43:47 -08002058 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002059 if (val >= 0) {
2060 dev_err(hsotg->dev,
2061 "Wrong value for host_support_fs_low_power\n");
2062 dev_err(hsotg->dev,
2063 "host_support_fs_low_power must be 0 or 1\n");
2064 }
2065 val = 0;
2066 dev_dbg(hsotg->dev,
2067 "Setting host_support_fs_low_power to %d\n", val);
2068 retval = -EINVAL;
2069 }
2070
2071 hsotg->core_params->host_support_fs_ls_low_power = val;
2072 return retval;
2073}
2074
2075int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2076{
2077 int valid = 1;
2078 int retval = 0;
2079
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002080 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002081 valid = 0;
2082 if (val < 0)
2083 valid = 0;
2084
2085 if (!valid) {
2086 if (val >= 0)
2087 dev_err(hsotg->dev,
2088 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2089 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002090 val = hsotg->hw_params.enable_dynamic_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002091 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2092 retval = -EINVAL;
2093 }
2094
2095 hsotg->core_params->enable_dynamic_fifo = val;
2096 return retval;
2097}
2098
2099int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2100{
2101 int valid = 1;
2102 int retval = 0;
2103
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002104 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002105 valid = 0;
2106
2107 if (!valid) {
2108 if (val >= 0)
2109 dev_err(hsotg->dev,
2110 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2111 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002112 val = hsotg->hw_params.host_rx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002113 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2114 retval = -EINVAL;
2115 }
2116
2117 hsotg->core_params->host_rx_fifo_size = val;
2118 return retval;
2119}
2120
2121int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2122{
2123 int valid = 1;
2124 int retval = 0;
2125
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002126 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002127 valid = 0;
2128
2129 if (!valid) {
2130 if (val >= 0)
2131 dev_err(hsotg->dev,
2132 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2133 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002134 val = hsotg->hw_params.host_nperio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002135 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2136 val);
2137 retval = -EINVAL;
2138 }
2139
2140 hsotg->core_params->host_nperio_tx_fifo_size = val;
2141 return retval;
2142}
2143
2144int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2145{
2146 int valid = 1;
2147 int retval = 0;
2148
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002149 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002150 valid = 0;
2151
2152 if (!valid) {
2153 if (val >= 0)
2154 dev_err(hsotg->dev,
2155 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2156 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002157 val = hsotg->hw_params.host_perio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002158 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2159 val);
2160 retval = -EINVAL;
2161 }
2162
2163 hsotg->core_params->host_perio_tx_fifo_size = val;
2164 return retval;
2165}
2166
2167int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2168{
2169 int valid = 1;
2170 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002171
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002172 if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002173 valid = 0;
2174
2175 if (!valid) {
2176 if (val >= 0)
2177 dev_err(hsotg->dev,
2178 "%d invalid for max_transfer_size. Check HW configuration.\n",
2179 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002180 val = hsotg->hw_params.max_transfer_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002181 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2182 retval = -EINVAL;
2183 }
2184
2185 hsotg->core_params->max_transfer_size = val;
2186 return retval;
2187}
2188
2189int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2190{
2191 int valid = 1;
2192 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002193
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002194 if (val < 15 || val > hsotg->hw_params.max_packet_count)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002195 valid = 0;
2196
2197 if (!valid) {
2198 if (val >= 0)
2199 dev_err(hsotg->dev,
2200 "%d invalid for max_packet_count. Check HW configuration.\n",
2201 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002202 val = hsotg->hw_params.max_packet_count;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002203 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2204 retval = -EINVAL;
2205 }
2206
2207 hsotg->core_params->max_packet_count = val;
2208 return retval;
2209}
2210
2211int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2212{
2213 int valid = 1;
2214 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002215
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002216 if (val < 1 || val > hsotg->hw_params.host_channels)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002217 valid = 0;
2218
2219 if (!valid) {
2220 if (val >= 0)
2221 dev_err(hsotg->dev,
2222 "%d invalid for host_channels. Check HW configuration.\n",
2223 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002224 val = hsotg->hw_params.host_channels;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002225 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2226 retval = -EINVAL;
2227 }
2228
2229 hsotg->core_params->host_channels = val;
2230 return retval;
2231}
2232
2233int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2234{
2235#ifndef NO_FS_PHY_HW_CHECKS
2236 int valid = 0;
Luis Ortega Perez de Villar0464a3d2013-09-25 13:10:50 +02002237 u32 hs_phy_type, fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002238#endif
2239 int retval = 0;
2240
Paul Zimmerman498f0662013-11-22 16:43:47 -08002241 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2242 DWC2_PHY_TYPE_PARAM_ULPI)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002243 if (val >= 0) {
2244 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2245 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2246 }
2247
2248#ifndef NO_FS_PHY_HW_CHECKS
2249 valid = 0;
2250#else
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002251 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002252 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2253 retval = -EINVAL;
2254#endif
2255 }
2256
2257#ifndef NO_FS_PHY_HW_CHECKS
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002258 hs_phy_type = hsotg->hw_params.hs_phy_type;
2259 fs_phy_type = hsotg->hw_params.fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002260 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2261 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2262 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2263 valid = 1;
2264 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2265 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2266 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2267 valid = 1;
2268 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2269 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2270 valid = 1;
2271
2272 if (!valid) {
2273 if (val >= 0)
2274 dev_err(hsotg->dev,
2275 "%d invalid for phy_type. Check HW configuration.\n",
2276 val);
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002277 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002278 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2279 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2280 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2281 val = DWC2_PHY_TYPE_PARAM_UTMI;
2282 else
2283 val = DWC2_PHY_TYPE_PARAM_ULPI;
2284 }
2285 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2286 retval = -EINVAL;
2287 }
2288#endif
2289
2290 hsotg->core_params->phy_type = val;
2291 return retval;
2292}
2293
2294static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2295{
2296 return hsotg->core_params->phy_type;
2297}
2298
2299int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2300{
2301 int valid = 1;
2302 int retval = 0;
2303
Paul Zimmerman498f0662013-11-22 16:43:47 -08002304 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002305 if (val >= 0) {
2306 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2307 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2308 }
2309 valid = 0;
2310 }
2311
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002312 if (val == DWC2_SPEED_PARAM_HIGH &&
2313 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002314 valid = 0;
2315
2316 if (!valid) {
2317 if (val >= 0)
2318 dev_err(hsotg->dev,
2319 "%d invalid for speed parameter. Check HW configuration.\n",
2320 val);
2321 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002322 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002323 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2324 retval = -EINVAL;
2325 }
2326
2327 hsotg->core_params->speed = val;
2328 return retval;
2329}
2330
2331int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2332{
2333 int valid = 1;
2334 int retval = 0;
2335
Paul Zimmerman498f0662013-11-22 16:43:47 -08002336 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2337 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002338 if (val >= 0) {
2339 dev_err(hsotg->dev,
2340 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2341 dev_err(hsotg->dev,
2342 "host_ls_low_power_phy_clk must be 0 or 1\n");
2343 }
2344 valid = 0;
2345 }
2346
2347 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2348 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2349 valid = 0;
2350
2351 if (!valid) {
2352 if (val >= 0)
2353 dev_err(hsotg->dev,
2354 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2355 val);
2356 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2357 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2358 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2359 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2360 val);
2361 retval = -EINVAL;
2362 }
2363
2364 hsotg->core_params->host_ls_low_power_phy_clk = val;
2365 return retval;
2366}
2367
2368int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2369{
2370 int retval = 0;
2371
Paul Zimmerman498f0662013-11-22 16:43:47 -08002372 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002373 if (val >= 0) {
2374 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2375 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2376 }
2377 val = 0;
2378 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2379 retval = -EINVAL;
2380 }
2381
2382 hsotg->core_params->phy_ulpi_ddr = val;
2383 return retval;
2384}
2385
2386int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2387{
2388 int retval = 0;
2389
Paul Zimmerman498f0662013-11-22 16:43:47 -08002390 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002391 if (val >= 0) {
2392 dev_err(hsotg->dev,
2393 "Wrong value for phy_ulpi_ext_vbus\n");
2394 dev_err(hsotg->dev,
2395 "phy_ulpi_ext_vbus must be 0 or 1\n");
2396 }
2397 val = 0;
2398 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2399 retval = -EINVAL;
2400 }
2401
2402 hsotg->core_params->phy_ulpi_ext_vbus = val;
2403 return retval;
2404}
2405
2406int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2407{
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002408 int valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002409 int retval = 0;
2410
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002411 switch (hsotg->hw_params.utmi_phy_data_width) {
2412 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2413 valid = (val == 8);
2414 break;
2415 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2416 valid = (val == 16);
2417 break;
2418 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2419 valid = (val == 8 || val == 16);
2420 break;
2421 }
2422
2423 if (!valid) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002424 if (val >= 0) {
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002425 dev_err(hsotg->dev,
2426 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2427 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002428 }
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002429 val = (hsotg->hw_params.utmi_phy_data_width ==
2430 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002431 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2432 retval = -EINVAL;
2433 }
2434
2435 hsotg->core_params->phy_utmi_width = val;
2436 return retval;
2437}
2438
2439int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2440{
2441 int retval = 0;
2442
Paul Zimmerman498f0662013-11-22 16:43:47 -08002443 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002444 if (val >= 0) {
2445 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2446 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2447 }
2448 val = 0;
2449 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2450 retval = -EINVAL;
2451 }
2452
2453 hsotg->core_params->ulpi_fs_ls = val;
2454 return retval;
2455}
2456
2457int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2458{
2459 int retval = 0;
2460
Paul Zimmerman498f0662013-11-22 16:43:47 -08002461 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002462 if (val >= 0) {
2463 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2464 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2465 }
2466 val = 0;
2467 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2468 retval = -EINVAL;
2469 }
2470
2471 hsotg->core_params->ts_dline = val;
2472 return retval;
2473}
2474
2475int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2476{
2477#ifndef NO_FS_PHY_HW_CHECKS
2478 int valid = 1;
2479#endif
2480 int retval = 0;
2481
Paul Zimmerman498f0662013-11-22 16:43:47 -08002482 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002483 if (val >= 0) {
2484 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2485 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2486 }
2487
2488#ifndef NO_FS_PHY_HW_CHECKS
2489 valid = 0;
2490#else
2491 val = 0;
2492 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2493 retval = -EINVAL;
2494#endif
2495 }
2496
2497#ifndef NO_FS_PHY_HW_CHECKS
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002498 if (val == 1 && !(hsotg->hw_params.i2c_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002499 valid = 0;
2500
2501 if (!valid) {
2502 if (val >= 0)
2503 dev_err(hsotg->dev,
2504 "%d invalid for i2c_enable. Check HW configuration.\n",
2505 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002506 val = hsotg->hw_params.i2c_enable;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002507 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2508 retval = -EINVAL;
2509 }
2510#endif
2511
2512 hsotg->core_params->i2c_enable = val;
2513 return retval;
2514}
2515
2516int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2517{
2518 int valid = 1;
2519 int retval = 0;
2520
Paul Zimmerman498f0662013-11-22 16:43:47 -08002521 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002522 if (val >= 0) {
2523 dev_err(hsotg->dev,
2524 "Wrong value for en_multiple_tx_fifo,\n");
2525 dev_err(hsotg->dev,
2526 "en_multiple_tx_fifo must be 0 or 1\n");
2527 }
2528 valid = 0;
2529 }
2530
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002531 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002532 valid = 0;
2533
2534 if (!valid) {
2535 if (val >= 0)
2536 dev_err(hsotg->dev,
2537 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2538 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002539 val = hsotg->hw_params.en_multiple_tx_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002540 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2541 retval = -EINVAL;
2542 }
2543
2544 hsotg->core_params->en_multiple_tx_fifo = val;
2545 return retval;
2546}
2547
2548int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2549{
2550 int valid = 1;
2551 int retval = 0;
2552
Paul Zimmerman498f0662013-11-22 16:43:47 -08002553 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002554 if (val >= 0) {
2555 dev_err(hsotg->dev,
2556 "'%d' invalid for parameter reload_ctl\n", val);
2557 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2558 }
2559 valid = 0;
2560 }
2561
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002562 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002563 valid = 0;
2564
2565 if (!valid) {
2566 if (val >= 0)
2567 dev_err(hsotg->dev,
2568 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2569 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002570 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002571 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2572 retval = -EINVAL;
2573 }
2574
2575 hsotg->core_params->reload_ctl = val;
2576 return retval;
2577}
2578
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002579int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002580{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002581 if (val != -1)
2582 hsotg->core_params->ahbcfg = val;
2583 else
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002584 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
Luis Ortega Perez de Villar0464a3d2013-09-25 13:10:50 +02002585 GAHBCFG_HBSTLEN_SHIFT;
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002586 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002587}
2588
2589int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2590{
2591 int retval = 0;
2592
Paul Zimmerman498f0662013-11-22 16:43:47 -08002593 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002594 if (val >= 0) {
2595 dev_err(hsotg->dev,
2596 "'%d' invalid for parameter otg_ver\n", val);
2597 dev_err(hsotg->dev,
2598 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2599 }
2600 val = 0;
2601 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2602 retval = -EINVAL;
2603 }
2604
2605 hsotg->core_params->otg_ver = val;
2606 return retval;
2607}
2608
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002609/**
2610 * During device initialization, read various hardware configuration
2611 * registers and interpret the contents.
2612 */
2613int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
2614{
2615 struct dwc2_hw_params *hw = &hsotg->hw_params;
2616 unsigned width;
2617 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
2618 u32 hptxfsiz, grxfsiz, gnptxfsiz;
2619 u32 gusbcfg;
2620
2621 /*
2622 * Attempt to ensure this device is really a DWC_otg Controller.
2623 * Read and verify the GSNPSID register contents. The value should be
2624 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2625 * as in "OTG version 2.xx" or "OTG version 3.xx".
2626 */
2627 hw->snpsid = readl(hsotg->regs + GSNPSID);
2628 if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
2629 (hw->snpsid & 0xfffff000) != 0x4f543000) {
2630 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
2631 hw->snpsid);
2632 return -ENODEV;
2633 }
2634
2635 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
2636 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
2637 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
2638
2639 hwcfg1 = readl(hsotg->regs + GHWCFG1);
2640 hwcfg2 = readl(hsotg->regs + GHWCFG2);
2641 hwcfg3 = readl(hsotg->regs + GHWCFG3);
2642 hwcfg4 = readl(hsotg->regs + GHWCFG4);
2643 gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
2644 grxfsiz = readl(hsotg->regs + GRXFSIZ);
2645
2646 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
2647 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
2648 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
2649 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
2650 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
2651 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
2652
2653 /* Force host mode to get HPTXFSIZ exact power on value */
2654 gusbcfg = readl(hsotg->regs + GUSBCFG);
2655 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2656 writel(gusbcfg, hsotg->regs + GUSBCFG);
2657 usleep_range(100000, 150000);
2658
2659 hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
2660 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
2661 gusbcfg = readl(hsotg->regs + GUSBCFG);
2662 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2663 writel(gusbcfg, hsotg->regs + GUSBCFG);
2664 usleep_range(100000, 150000);
2665
2666 /* hwcfg2 */
2667 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
2668 GHWCFG2_OP_MODE_SHIFT;
2669 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2670 GHWCFG2_ARCHITECTURE_SHIFT;
2671 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2672 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
2673 GHWCFG2_NUM_HOST_CHAN_SHIFT);
2674 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2675 GHWCFG2_HS_PHY_TYPE_SHIFT;
2676 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2677 GHWCFG2_FS_PHY_TYPE_SHIFT;
2678 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
2679 GHWCFG2_NUM_DEV_EP_SHIFT;
2680 hw->nperio_tx_q_depth =
2681 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
2682 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
2683 hw->host_perio_tx_q_depth =
2684 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
2685 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
2686 hw->dev_token_q_depth =
2687 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
2688 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
2689
2690 /* hwcfg3 */
2691 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
2692 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2693 hw->max_transfer_size = (1 << (width + 11)) - 1;
2694 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
2695 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2696 hw->max_packet_count = (1 << (width + 4)) - 1;
2697 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
2698 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
2699 GHWCFG3_DFIFO_DEPTH_SHIFT;
2700
2701 /* hwcfg4 */
2702 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
2703 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
2704 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
2705 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
2706 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002707 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
2708 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002709
2710 /* fifo sizes */
2711 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
2712 GRXFSIZ_DEPTH_SHIFT;
2713 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2714 FIFOSIZE_DEPTH_SHIFT;
2715 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2716 FIFOSIZE_DEPTH_SHIFT;
2717
2718 dev_dbg(hsotg->dev, "Detected values from hardware:\n");
2719 dev_dbg(hsotg->dev, " op_mode=%d\n",
2720 hw->op_mode);
2721 dev_dbg(hsotg->dev, " arch=%d\n",
2722 hw->arch);
2723 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
2724 hw->dma_desc_enable);
2725 dev_dbg(hsotg->dev, " power_optimized=%d\n",
2726 hw->power_optimized);
2727 dev_dbg(hsotg->dev, " i2c_enable=%d\n",
2728 hw->i2c_enable);
2729 dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
2730 hw->hs_phy_type);
2731 dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
2732 hw->fs_phy_type);
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002733 dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n",
2734 hw->utmi_phy_data_width);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002735 dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
2736 hw->num_dev_ep);
2737 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
2738 hw->num_dev_perio_in_ep);
2739 dev_dbg(hsotg->dev, " host_channels=%d\n",
2740 hw->host_channels);
2741 dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
2742 hw->max_transfer_size);
2743 dev_dbg(hsotg->dev, " max_packet_count=%d\n",
2744 hw->max_packet_count);
2745 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
2746 hw->nperio_tx_q_depth);
2747 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
2748 hw->host_perio_tx_q_depth);
2749 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
2750 hw->dev_token_q_depth);
2751 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
2752 hw->enable_dynamic_fifo);
2753 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
2754 hw->en_multiple_tx_fifo);
2755 dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
2756 hw->total_fifo_size);
2757 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
2758 hw->host_rx_fifo_size);
2759 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
2760 hw->host_nperio_tx_fifo_size);
2761 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
2762 hw->host_perio_tx_fifo_size);
2763 dev_dbg(hsotg->dev, "\n");
2764
2765 return 0;
2766}
2767
Dom Cobley20f2eb92013-09-23 14:23:34 -07002768int dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
2769{
2770 int retval = 0;
2771
Paul Zimmerman498f0662013-11-22 16:43:47 -08002772 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Dom Cobley20f2eb92013-09-23 14:23:34 -07002773 if (val >= 0) {
2774 dev_err(hsotg->dev,
2775 "'%d' invalid for parameter uframe_sched\n",
2776 val);
2777 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2778 }
2779 val = 1;
2780 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2781 retval = -EINVAL;
2782 }
2783
2784 hsotg->core_params->uframe_sched = val;
2785 return retval;
2786}
2787
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002788/*
2789 * This function is called during module intialization to pass module parameters
2790 * for the DWC_otg core. It returns non-0 if any parameters are invalid.
2791 */
2792int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
Stephen Warren90dbcea2013-04-29 19:49:08 +00002793 const struct dwc2_core_params *params)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002794{
2795 int retval = 0;
2796
2797 dev_dbg(hsotg->dev, "%s()\n", __func__);
2798
2799 retval |= dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2800 retval |= dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2801 retval |= dwc2_set_param_dma_desc_enable(hsotg,
2802 params->dma_desc_enable);
2803 retval |= dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2804 params->host_support_fs_ls_low_power);
2805 retval |= dwc2_set_param_enable_dynamic_fifo(hsotg,
2806 params->enable_dynamic_fifo);
2807 retval |= dwc2_set_param_host_rx_fifo_size(hsotg,
2808 params->host_rx_fifo_size);
2809 retval |= dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2810 params->host_nperio_tx_fifo_size);
2811 retval |= dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2812 params->host_perio_tx_fifo_size);
2813 retval |= dwc2_set_param_max_transfer_size(hsotg,
2814 params->max_transfer_size);
2815 retval |= dwc2_set_param_max_packet_count(hsotg,
2816 params->max_packet_count);
2817 retval |= dwc2_set_param_host_channels(hsotg, params->host_channels);
2818 retval |= dwc2_set_param_phy_type(hsotg, params->phy_type);
2819 retval |= dwc2_set_param_speed(hsotg, params->speed);
2820 retval |= dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2821 params->host_ls_low_power_phy_clk);
2822 retval |= dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2823 retval |= dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2824 params->phy_ulpi_ext_vbus);
2825 retval |= dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2826 retval |= dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2827 retval |= dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2828 retval |= dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2829 retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
2830 params->en_multiple_tx_fifo);
2831 retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002832 retval |= dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002833 retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
Dom Cobley20f2eb92013-09-23 14:23:34 -07002834 retval |= dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002835
2836 return retval;
2837}
2838
2839u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2840{
2841 return (u16)(hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103);
2842}
2843
2844int dwc2_check_core_status(struct dwc2_hsotg *hsotg)
2845{
2846 if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
2847 return -1;
2848 else
2849 return 0;
2850}
2851
2852/**
2853 * dwc2_enable_global_interrupts() - Enables the controller's Global
2854 * Interrupt in the AHB Config register
2855 *
2856 * @hsotg: Programming view of DWC_otg controller
2857 */
2858void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2859{
2860 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2861
2862 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2863 writel(ahbcfg, hsotg->regs + GAHBCFG);
2864}
2865
2866/**
2867 * dwc2_disable_global_interrupts() - Disables the controller's Global
2868 * Interrupt in the AHB Config register
2869 *
2870 * @hsotg: Programming view of DWC_otg controller
2871 */
2872void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2873{
2874 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2875
2876 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2877 writel(ahbcfg, hsotg->regs + GAHBCFG);
2878}
2879
2880MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
2881MODULE_AUTHOR("Synopsys, Inc.");
2882MODULE_LICENSE("Dual BSD/GPL");