blob: 06dae67a9d6294499ebf5926a7734bd56e1dc1ec [file] [log] [blame]
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001/*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/dma-mapping.h>
48#include <linux/delay.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/usb.h>
52
53#include <linux/usb/hcd.h>
54#include <linux/usb/ch11.h>
55
56#include "core.h"
57#include "hcd.h"
58
59/**
60 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
61 * used in both device and host modes
62 *
63 * @hsotg: Programming view of the DWC_otg controller
64 */
65static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
66{
67 u32 intmsk;
68
69 /* Clear any pending OTG Interrupts */
70 writel(0xffffffff, hsotg->regs + GOTGINT);
71
72 /* Clear any pending interrupts */
73 writel(0xffffffff, hsotg->regs + GINTSTS);
74
75 /* Enable the interrupts in the GINTMSK */
76 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
77
78 if (hsotg->core_params->dma_enable <= 0)
79 intmsk |= GINTSTS_RXFLVL;
80
81 intmsk |= GINTSTS_CONIDSTSCHNG | GINTSTS_WKUPINT | GINTSTS_USBSUSP |
82 GINTSTS_SESSREQINT;
83
84 writel(intmsk, hsotg->regs + GINTMSK);
85}
86
87/*
88 * Initializes the FSLSPClkSel field of the HCFG register depending on the
89 * PHY type
90 */
91static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
92{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -070093 u32 hcfg, val;
94
Matthijs Kooijman9badec22013-08-30 18:45:21 +020095 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
96 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -070097 hsotg->core_params->ulpi_fs_ls > 0) ||
98 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
99 /* Full speed PHY */
100 val = HCFG_FSLSPCLKSEL_48_MHZ;
101 } else {
102 /* High speed PHY running at full speed or high speed */
103 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
104 }
105
106 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
107 hcfg = readl(hsotg->regs + HCFG);
108 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200109 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700110 writel(hcfg, hsotg->regs + HCFG);
111}
112
113/*
114 * Do core a soft reset of the core. Be careful with this because it
115 * resets all the internal state machines of the core.
116 */
117static void dwc2_core_reset(struct dwc2_hsotg *hsotg)
118{
119 u32 greset;
120 int count = 0;
121
122 dev_vdbg(hsotg->dev, "%s()\n", __func__);
123
124 /* Wait for AHB master IDLE state */
125 do {
126 usleep_range(20000, 40000);
127 greset = readl(hsotg->regs + GRSTCTL);
128 if (++count > 50) {
129 dev_warn(hsotg->dev,
130 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
131 __func__, greset);
132 return;
133 }
134 } while (!(greset & GRSTCTL_AHBIDLE));
135
136 /* Core Soft Reset */
137 count = 0;
138 greset |= GRSTCTL_CSFTRST;
139 writel(greset, hsotg->regs + GRSTCTL);
140 do {
141 usleep_range(20000, 40000);
142 greset = readl(hsotg->regs + GRSTCTL);
143 if (++count > 50) {
144 dev_warn(hsotg->dev,
145 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
146 __func__, greset);
147 break;
148 }
149 } while (greset & GRSTCTL_CSFTRST);
150
151 /*
152 * NOTE: This long sleep is _very_ important, otherwise the core will
153 * not stay in host mode after a connector ID change!
154 */
155 usleep_range(150000, 200000);
156}
157
158static void dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
159{
160 u32 usbcfg, i2cctl;
161
162 /*
163 * core_init() is now called on every switch so only call the
164 * following for the first time through
165 */
166 if (select_phy) {
167 dev_dbg(hsotg->dev, "FS PHY selected\n");
168 usbcfg = readl(hsotg->regs + GUSBCFG);
169 usbcfg |= GUSBCFG_PHYSEL;
170 writel(usbcfg, hsotg->regs + GUSBCFG);
171
172 /* Reset after a PHY select */
173 dwc2_core_reset(hsotg);
174 }
175
176 /*
177 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
178 * do this on HNP Dev/Host mode switches (done in dev_init and
179 * host_init).
180 */
181 if (dwc2_is_host_mode(hsotg))
182 dwc2_init_fs_ls_pclk_sel(hsotg);
183
184 if (hsotg->core_params->i2c_enable > 0) {
185 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
186
187 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
188 usbcfg = readl(hsotg->regs + GUSBCFG);
189 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
190 writel(usbcfg, hsotg->regs + GUSBCFG);
191
192 /* Program GI2CCTL.I2CEn */
193 i2cctl = readl(hsotg->regs + GI2CCTL);
194 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
195 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
196 i2cctl &= ~GI2CCTL_I2CEN;
197 writel(i2cctl, hsotg->regs + GI2CCTL);
198 i2cctl |= GI2CCTL_I2CEN;
199 writel(i2cctl, hsotg->regs + GI2CCTL);
200 }
201}
202
203static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
204{
205 u32 usbcfg;
206
207 if (!select_phy)
208 return;
209
210 usbcfg = readl(hsotg->regs + GUSBCFG);
211
212 /*
213 * HS PHY parameters. These parameters are preserved during soft reset
214 * so only program the first time. Do a soft reset immediately after
215 * setting phyif.
216 */
217 switch (hsotg->core_params->phy_type) {
218 case DWC2_PHY_TYPE_PARAM_ULPI:
219 /* ULPI interface */
220 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
221 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
222 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
223 if (hsotg->core_params->phy_ulpi_ddr > 0)
224 usbcfg |= GUSBCFG_DDRSEL;
225 break;
226 case DWC2_PHY_TYPE_PARAM_UTMI:
227 /* UTMI+ interface */
228 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
229 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
230 if (hsotg->core_params->phy_utmi_width == 16)
231 usbcfg |= GUSBCFG_PHYIF16;
232 break;
233 default:
234 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
235 break;
236 }
237
238 writel(usbcfg, hsotg->regs + GUSBCFG);
239
240 /* Reset after setting the PHY parameters */
241 dwc2_core_reset(hsotg);
242}
243
244static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
245{
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200246 u32 usbcfg;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700247
248 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
249 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
250 /* If FS mode with FS PHY */
251 dwc2_fs_phy_init(hsotg, select_phy);
252 } else {
253 /* High speed PHY */
254 dwc2_hs_phy_init(hsotg, select_phy);
255 }
256
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200257 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
258 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700259 hsotg->core_params->ulpi_fs_ls > 0) {
260 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
261 usbcfg = readl(hsotg->regs + GUSBCFG);
262 usbcfg |= GUSBCFG_ULPI_FS_LS;
263 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
264 writel(usbcfg, hsotg->regs + GUSBCFG);
265 } else {
266 usbcfg = readl(hsotg->regs + GUSBCFG);
267 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
268 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
269 writel(usbcfg, hsotg->regs + GUSBCFG);
270 }
271}
272
273static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
274{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700275 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700276
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200277 switch (hsotg->hw_params.arch) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700278 case GHWCFG2_EXT_DMA_ARCH:
279 dev_err(hsotg->dev, "External DMA Mode not supported\n");
280 return -EINVAL;
281
282 case GHWCFG2_INT_DMA_ARCH:
283 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700284 if (hsotg->core_params->ahbcfg != -1) {
285 ahbcfg &= GAHBCFG_CTRL_MASK;
286 ahbcfg |= hsotg->core_params->ahbcfg &
287 ~GAHBCFG_CTRL_MASK;
288 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700289 break;
290
291 case GHWCFG2_SLAVE_ONLY_ARCH:
292 default:
293 dev_dbg(hsotg->dev, "Slave Only Mode\n");
294 break;
295 }
296
297 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
298 hsotg->core_params->dma_enable,
299 hsotg->core_params->dma_desc_enable);
300
301 if (hsotg->core_params->dma_enable > 0) {
302 if (hsotg->core_params->dma_desc_enable > 0)
303 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
304 else
305 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
306 } else {
307 dev_dbg(hsotg->dev, "Using Slave mode\n");
308 hsotg->core_params->dma_desc_enable = 0;
309 }
310
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700311 if (hsotg->core_params->dma_enable > 0)
312 ahbcfg |= GAHBCFG_DMA_EN;
313
314 writel(ahbcfg, hsotg->regs + GAHBCFG);
315
316 return 0;
317}
318
319static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
320{
321 u32 usbcfg;
322
323 usbcfg = readl(hsotg->regs + GUSBCFG);
324 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
325
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200326 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700327 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
328 if (hsotg->core_params->otg_cap ==
329 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
330 usbcfg |= GUSBCFG_HNPCAP;
331 if (hsotg->core_params->otg_cap !=
332 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
333 usbcfg |= GUSBCFG_SRPCAP;
334 break;
335
336 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
337 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
338 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
339 if (hsotg->core_params->otg_cap !=
340 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
341 usbcfg |= GUSBCFG_SRPCAP;
342 break;
343
344 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
345 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
346 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
347 default:
348 break;
349 }
350
351 writel(usbcfg, hsotg->regs + GUSBCFG);
352}
353
354/**
355 * dwc2_core_init() - Initializes the DWC_otg controller registers and
356 * prepares the core for device mode or host mode operation
357 *
358 * @hsotg: Programming view of the DWC_otg controller
359 * @select_phy: If true then also set the Phy type
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200360 * @irq: If >= 0, the irq to register
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700361 */
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200362int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700363{
364 u32 usbcfg, otgctl;
365 int retval;
366
367 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
368
369 usbcfg = readl(hsotg->regs + GUSBCFG);
370
371 /* Set ULPI External VBUS bit if needed */
372 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
373 if (hsotg->core_params->phy_ulpi_ext_vbus ==
374 DWC2_PHY_ULPI_EXTERNAL_VBUS)
375 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
376
377 /* Set external TS Dline pulsing bit if needed */
378 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
379 if (hsotg->core_params->ts_dline > 0)
380 usbcfg |= GUSBCFG_TERMSELDLPULSE;
381
382 writel(usbcfg, hsotg->regs + GUSBCFG);
383
384 /* Reset the Controller */
385 dwc2_core_reset(hsotg);
386
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700387 /*
388 * This needs to happen in FS mode before any other programming occurs
389 */
390 dwc2_phy_init(hsotg, select_phy);
391
392 /* Program the GAHBCFG Register */
393 retval = dwc2_gahbcfg_init(hsotg);
394 if (retval)
395 return retval;
396
397 /* Program the GUSBCFG register */
398 dwc2_gusbcfg_init(hsotg);
399
400 /* Program the GOTGCTL register */
401 otgctl = readl(hsotg->regs + GOTGCTL);
402 otgctl &= ~GOTGCTL_OTGVER;
403 if (hsotg->core_params->otg_ver > 0)
404 otgctl |= GOTGCTL_OTGVER;
405 writel(otgctl, hsotg->regs + GOTGCTL);
406 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
407
408 /* Clear the SRP success bit for FS-I2c */
409 hsotg->srp_success = 0;
410
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200411 if (irq >= 0) {
412 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
413 irq);
414 retval = devm_request_irq(hsotg->dev, irq,
415 dwc2_handle_common_intr, IRQF_SHARED,
416 dev_name(hsotg->dev), hsotg);
417 if (retval)
418 return retval;
419 }
420
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700421 /* Enable common interrupts */
422 dwc2_enable_common_interrupts(hsotg);
423
424 /*
425 * Do device or host intialization based on mode during PCD and
426 * HCD initialization
427 */
428 if (dwc2_is_host_mode(hsotg)) {
429 dev_dbg(hsotg->dev, "Host Mode\n");
430 hsotg->op_state = OTG_STATE_A_HOST;
431 } else {
432 dev_dbg(hsotg->dev, "Device Mode\n");
433 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
434 }
435
436 return 0;
437}
438
439/**
440 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
441 *
442 * @hsotg: Programming view of DWC_otg controller
443 */
444void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
445{
446 u32 intmsk;
447
448 dev_dbg(hsotg->dev, "%s()\n", __func__);
449
450 /* Disable all interrupts */
451 writel(0, hsotg->regs + GINTMSK);
452 writel(0, hsotg->regs + HAINTMSK);
453
454 /* Clear any pending interrupts */
455 writel(0xffffffff, hsotg->regs + GINTSTS);
456
457 /* Enable the common interrupts */
458 dwc2_enable_common_interrupts(hsotg);
459
460 /* Enable host mode interrupts without disturbing common interrupts */
461 intmsk = readl(hsotg->regs + GINTMSK);
462 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
463 writel(intmsk, hsotg->regs + GINTMSK);
464}
465
466/**
467 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
468 *
469 * @hsotg: Programming view of DWC_otg controller
470 */
471void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
472{
473 u32 intmsk = readl(hsotg->regs + GINTMSK);
474
475 /* Disable host mode interrupts without disturbing common interrupts */
476 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
477 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
478 writel(intmsk, hsotg->regs + GINTMSK);
479}
480
481static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
482{
483 struct dwc2_core_params *params = hsotg->core_params;
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200484 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700485
Matthijs Kooijman12086052013-04-29 19:46:35 +0000486 if (!params->enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700487 return;
488
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700489 /* Rx FIFO */
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200490 grxfsiz = readl(hsotg->regs + GRXFSIZ);
491 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
492 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
493 grxfsiz |= params->host_rx_fifo_size <<
494 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
495 writel(grxfsiz, hsotg->regs + GRXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700496 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
497
498 /* Non-periodic Tx FIFO */
499 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
500 readl(hsotg->regs + GNPTXFSIZ));
501 nptxfsiz = params->host_nperio_tx_fifo_size <<
502 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
503 nptxfsiz |= params->host_rx_fifo_size <<
504 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
505 writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
506 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
507 readl(hsotg->regs + GNPTXFSIZ));
508
509 /* Periodic Tx FIFO */
510 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
511 readl(hsotg->regs + HPTXFSIZ));
Matthijs Kooijmanc35205a2013-08-30 18:45:18 +0200512 hptxfsiz = params->host_perio_tx_fifo_size <<
513 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
514 hptxfsiz |= (params->host_rx_fifo_size +
515 params->host_nperio_tx_fifo_size) <<
516 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
517 writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700518 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
519 readl(hsotg->regs + HPTXFSIZ));
520
521 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200522 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700523 /*
524 * Global DFIFOCFG calculation for Host mode -
525 * include RxFIFO, NPTXFIFO and HPTXFIFO
526 */
527 dfifocfg = readl(hsotg->regs + GDFIFOCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700528 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
Matthijs Kooijman08b9f9d2013-08-30 18:45:19 +0200529 dfifocfg |= (params->host_rx_fifo_size +
530 params->host_nperio_tx_fifo_size +
531 params->host_perio_tx_fifo_size) <<
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700532 GDFIFOCFG_EPINFOBASE_SHIFT &
533 GDFIFOCFG_EPINFOBASE_MASK;
534 writel(dfifocfg, hsotg->regs + GDFIFOCFG);
535 }
536}
537
538/**
539 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
540 * Host mode
541 *
542 * @hsotg: Programming view of DWC_otg controller
543 *
544 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
545 * request queues. Host channels are reset to ensure that they are ready for
546 * performing transfers.
547 */
548void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
549{
550 u32 hcfg, hfir, otgctl;
551
552 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
553
554 /* Restart the Phy Clock */
555 writel(0, hsotg->regs + PCGCTL);
556
557 /* Initialize Host Configuration Register */
558 dwc2_init_fs_ls_pclk_sel(hsotg);
559 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
560 hcfg = readl(hsotg->regs + HCFG);
561 hcfg |= HCFG_FSLSSUPP;
562 writel(hcfg, hsotg->regs + HCFG);
563 }
564
565 /*
566 * This bit allows dynamic reloading of the HFIR register during
567 * runtime. This bit needs to be programmed during inital configuration
568 * and its value must not be changed during runtime.
569 */
570 if (hsotg->core_params->reload_ctl > 0) {
571 hfir = readl(hsotg->regs + HFIR);
572 hfir |= HFIR_RLDCTRL;
573 writel(hfir, hsotg->regs + HFIR);
574 }
575
576 if (hsotg->core_params->dma_desc_enable > 0) {
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200577 u32 op_mode = hsotg->hw_params.op_mode;
578 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
579 !hsotg->hw_params.dma_desc_enable ||
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700580 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
581 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
582 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
583 dev_err(hsotg->dev,
584 "Hardware does not support descriptor DMA mode -\n");
585 dev_err(hsotg->dev,
586 "falling back to buffer DMA mode.\n");
587 hsotg->core_params->dma_desc_enable = 0;
588 } else {
589 hcfg = readl(hsotg->regs + HCFG);
590 hcfg |= HCFG_DESCDMA;
591 writel(hcfg, hsotg->regs + HCFG);
592 }
593 }
594
595 /* Configure data FIFO sizes */
596 dwc2_config_fifos(hsotg);
597
598 /* TODO - check this */
599 /* Clear Host Set HNP Enable in the OTG Control Register */
600 otgctl = readl(hsotg->regs + GOTGCTL);
601 otgctl &= ~GOTGCTL_HSTSETHNPEN;
602 writel(otgctl, hsotg->regs + GOTGCTL);
603
604 /* Make sure the FIFOs are flushed */
605 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
606 dwc2_flush_rx_fifo(hsotg);
607
608 /* Clear Host Set HNP Enable in the OTG Control Register */
609 otgctl = readl(hsotg->regs + GOTGCTL);
610 otgctl &= ~GOTGCTL_HSTSETHNPEN;
611 writel(otgctl, hsotg->regs + GOTGCTL);
612
613 if (hsotg->core_params->dma_desc_enable <= 0) {
614 int num_channels, i;
615 u32 hcchar;
616
617 /* Flush out any leftover queued requests */
618 num_channels = hsotg->core_params->host_channels;
619 for (i = 0; i < num_channels; i++) {
620 hcchar = readl(hsotg->regs + HCCHAR(i));
621 hcchar &= ~HCCHAR_CHENA;
622 hcchar |= HCCHAR_CHDIS;
623 hcchar &= ~HCCHAR_EPDIR;
624 writel(hcchar, hsotg->regs + HCCHAR(i));
625 }
626
627 /* Halt all channels to put them into a known state */
628 for (i = 0; i < num_channels; i++) {
629 int count = 0;
630
631 hcchar = readl(hsotg->regs + HCCHAR(i));
632 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
633 hcchar &= ~HCCHAR_EPDIR;
634 writel(hcchar, hsotg->regs + HCCHAR(i));
635 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
636 __func__, i);
637 do {
638 hcchar = readl(hsotg->regs + HCCHAR(i));
639 if (++count > 1000) {
640 dev_err(hsotg->dev,
641 "Unable to clear enable on channel %d\n",
642 i);
643 break;
644 }
645 udelay(1);
646 } while (hcchar & HCCHAR_CHENA);
647 }
648 }
649
650 /* Turn on the vbus power */
651 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
652 if (hsotg->op_state == OTG_STATE_A_HOST) {
653 u32 hprt0 = dwc2_read_hprt0(hsotg);
654
655 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
656 !!(hprt0 & HPRT0_PWR));
657 if (!(hprt0 & HPRT0_PWR)) {
658 hprt0 |= HPRT0_PWR;
659 writel(hprt0, hsotg->regs + HPRT0);
660 }
661 }
662
663 dwc2_enable_host_interrupts(hsotg);
664}
665
666static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
667 struct dwc2_host_chan *chan)
668{
669 u32 hcintmsk = HCINTMSK_CHHLTD;
670
671 switch (chan->ep_type) {
672 case USB_ENDPOINT_XFER_CONTROL:
673 case USB_ENDPOINT_XFER_BULK:
674 dev_vdbg(hsotg->dev, "control/bulk\n");
675 hcintmsk |= HCINTMSK_XFERCOMPL;
676 hcintmsk |= HCINTMSK_STALL;
677 hcintmsk |= HCINTMSK_XACTERR;
678 hcintmsk |= HCINTMSK_DATATGLERR;
679 if (chan->ep_is_in) {
680 hcintmsk |= HCINTMSK_BBLERR;
681 } else {
682 hcintmsk |= HCINTMSK_NAK;
683 hcintmsk |= HCINTMSK_NYET;
684 if (chan->do_ping)
685 hcintmsk |= HCINTMSK_ACK;
686 }
687
688 if (chan->do_split) {
689 hcintmsk |= HCINTMSK_NAK;
690 if (chan->complete_split)
691 hcintmsk |= HCINTMSK_NYET;
692 else
693 hcintmsk |= HCINTMSK_ACK;
694 }
695
696 if (chan->error_state)
697 hcintmsk |= HCINTMSK_ACK;
698 break;
699
700 case USB_ENDPOINT_XFER_INT:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200701 if (dbg_perio())
702 dev_vdbg(hsotg->dev, "intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700703 hcintmsk |= HCINTMSK_XFERCOMPL;
704 hcintmsk |= HCINTMSK_NAK;
705 hcintmsk |= HCINTMSK_STALL;
706 hcintmsk |= HCINTMSK_XACTERR;
707 hcintmsk |= HCINTMSK_DATATGLERR;
708 hcintmsk |= HCINTMSK_FRMOVRUN;
709
710 if (chan->ep_is_in)
711 hcintmsk |= HCINTMSK_BBLERR;
712 if (chan->error_state)
713 hcintmsk |= HCINTMSK_ACK;
714 if (chan->do_split) {
715 if (chan->complete_split)
716 hcintmsk |= HCINTMSK_NYET;
717 else
718 hcintmsk |= HCINTMSK_ACK;
719 }
720 break;
721
722 case USB_ENDPOINT_XFER_ISOC:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200723 if (dbg_perio())
724 dev_vdbg(hsotg->dev, "isoc\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700725 hcintmsk |= HCINTMSK_XFERCOMPL;
726 hcintmsk |= HCINTMSK_FRMOVRUN;
727 hcintmsk |= HCINTMSK_ACK;
728
729 if (chan->ep_is_in) {
730 hcintmsk |= HCINTMSK_XACTERR;
731 hcintmsk |= HCINTMSK_BBLERR;
732 }
733 break;
734 default:
735 dev_err(hsotg->dev, "## Unknown EP type ##\n");
736 break;
737 }
738
739 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200740 if (dbg_hc(chan))
741 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700742}
743
744static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
745 struct dwc2_host_chan *chan)
746{
747 u32 hcintmsk = HCINTMSK_CHHLTD;
748
749 /*
750 * For Descriptor DMA mode core halts the channel on AHB error.
751 * Interrupt is not required.
752 */
753 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200754 if (dbg_hc(chan))
755 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700756 hcintmsk |= HCINTMSK_AHBERR;
757 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200758 if (dbg_hc(chan))
759 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700760 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
761 hcintmsk |= HCINTMSK_XFERCOMPL;
762 }
763
764 if (chan->error_state && !chan->do_split &&
765 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200766 if (dbg_hc(chan))
767 dev_vdbg(hsotg->dev, "setting ACK\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700768 hcintmsk |= HCINTMSK_ACK;
769 if (chan->ep_is_in) {
770 hcintmsk |= HCINTMSK_DATATGLERR;
771 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
772 hcintmsk |= HCINTMSK_NAK;
773 }
774 }
775
776 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200777 if (dbg_hc(chan))
778 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700779}
780
781static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
782 struct dwc2_host_chan *chan)
783{
784 u32 intmsk;
785
786 if (hsotg->core_params->dma_enable > 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200787 if (dbg_hc(chan))
788 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700789 dwc2_hc_enable_dma_ints(hsotg, chan);
790 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200791 if (dbg_hc(chan))
792 dev_vdbg(hsotg->dev, "DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700793 dwc2_hc_enable_slave_ints(hsotg, chan);
794 }
795
796 /* Enable the top level host channel interrupt */
797 intmsk = readl(hsotg->regs + HAINTMSK);
798 intmsk |= 1 << chan->hc_num;
799 writel(intmsk, hsotg->regs + HAINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200800 if (dbg_hc(chan))
801 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700802
803 /* Make sure host channel interrupts are enabled */
804 intmsk = readl(hsotg->regs + GINTMSK);
805 intmsk |= GINTSTS_HCHINT;
806 writel(intmsk, hsotg->regs + GINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200807 if (dbg_hc(chan))
808 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700809}
810
811/**
812 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
813 * a specific endpoint
814 *
815 * @hsotg: Programming view of DWC_otg controller
816 * @chan: Information needed to initialize the host channel
817 *
818 * The HCCHARn register is set up with the characteristics specified in chan.
819 * Host channel interrupts that may need to be serviced while this transfer is
820 * in progress are enabled.
821 */
822void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
823{
824 u8 hc_num = chan->hc_num;
825 u32 hcintmsk;
826 u32 hcchar;
827 u32 hcsplt = 0;
828
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200829 if (dbg_hc(chan))
830 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700831
832 /* Clear old interrupt conditions for this host channel */
833 hcintmsk = 0xffffffff;
834 hcintmsk &= ~HCINTMSK_RESERVED14_31;
835 writel(hcintmsk, hsotg->regs + HCINT(hc_num));
836
837 /* Enable channel interrupts required for this transfer */
838 dwc2_hc_enable_ints(hsotg, chan);
839
840 /*
841 * Program the HCCHARn register with the endpoint characteristics for
842 * the current transfer
843 */
844 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
845 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
846 if (chan->ep_is_in)
847 hcchar |= HCCHAR_EPDIR;
848 if (chan->speed == USB_SPEED_LOW)
849 hcchar |= HCCHAR_LSPDDEV;
850 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
851 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
852 writel(hcchar, hsotg->regs + HCCHAR(hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200853 if (dbg_hc(chan)) {
854 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
855 hc_num, hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700856
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200857 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
858 __func__, hc_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200859 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200860 chan->dev_addr);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200861 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200862 chan->ep_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200863 dev_vdbg(hsotg->dev, " Is In: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200864 chan->ep_is_in);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200865 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200866 chan->speed == USB_SPEED_LOW);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200867 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200868 chan->ep_type);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200869 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200870 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200871 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700872
873 /* Program the HCSPLT register for SPLITs */
874 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200875 if (dbg_hc(chan))
876 dev_vdbg(hsotg->dev,
877 "Programming HC %d with split --> %s\n",
878 hc_num,
879 chan->complete_split ? "CSPLIT" : "SSPLIT");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700880 if (chan->complete_split)
881 hcsplt |= HCSPLT_COMPSPLT;
882 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
883 HCSPLT_XACTPOS_MASK;
884 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
885 HCSPLT_HUBADDR_MASK;
886 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
887 HCSPLT_PRTADDR_MASK;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200888 if (dbg_hc(chan)) {
889 dev_vdbg(hsotg->dev, " comp split %d\n",
890 chan->complete_split);
891 dev_vdbg(hsotg->dev, " xact pos %d\n",
892 chan->xact_pos);
893 dev_vdbg(hsotg->dev, " hub addr %d\n",
894 chan->hub_addr);
895 dev_vdbg(hsotg->dev, " hub port %d\n",
896 chan->hub_port);
897 dev_vdbg(hsotg->dev, " is_in %d\n",
898 chan->ep_is_in);
899 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +0200900 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200901 dev_vdbg(hsotg->dev, " xferlen %d\n",
902 chan->xfer_len);
903 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700904 }
905
906 writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
907}
908
909/**
910 * dwc2_hc_halt() - Attempts to halt a host channel
911 *
912 * @hsotg: Controller register interface
913 * @chan: Host channel to halt
914 * @halt_status: Reason for halting the channel
915 *
916 * This function should only be called in Slave mode or to abort a transfer in
917 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
918 * controller halts the channel when the transfer is complete or a condition
919 * occurs that requires application intervention.
920 *
921 * In slave mode, checks for a free request queue entry, then sets the Channel
922 * Enable and Channel Disable bits of the Host Channel Characteristics
923 * register of the specified channel to intiate the halt. If there is no free
924 * request queue entry, sets only the Channel Disable bit of the HCCHARn
925 * register to flush requests for this channel. In the latter case, sets a
926 * flag to indicate that the host channel needs to be halted when a request
927 * queue slot is open.
928 *
929 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
930 * HCCHARn register. The controller ensures there is space in the request
931 * queue before submitting the halt request.
932 *
933 * Some time may elapse before the core flushes any posted requests for this
934 * host channel and halts. The Channel Halted interrupt handler completes the
935 * deactivation of the host channel.
936 */
937void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
938 enum dwc2_halt_status halt_status)
939{
940 u32 nptxsts, hptxsts, hcchar;
941
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200942 if (dbg_hc(chan))
943 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700944 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
945 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
946
947 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
948 halt_status == DWC2_HC_XFER_AHB_ERR) {
949 /*
950 * Disable all channel interrupts except Ch Halted. The QTD
951 * and QH state associated with this transfer has been cleared
952 * (in the case of URB_DEQUEUE), so the channel needs to be
953 * shut down carefully to prevent crashes.
954 */
955 u32 hcintmsk = HCINTMSK_CHHLTD;
956
957 dev_vdbg(hsotg->dev, "dequeue/error\n");
958 writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
959
960 /*
961 * Make sure no other interrupts besides halt are currently
962 * pending. Handling another interrupt could cause a crash due
963 * to the QTD and QH state.
964 */
965 writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
966
967 /*
968 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
969 * even if the channel was already halted for some other
970 * reason
971 */
972 chan->halt_status = halt_status;
973
974 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
975 if (!(hcchar & HCCHAR_CHENA)) {
976 /*
977 * The channel is either already halted or it hasn't
978 * started yet. In DMA mode, the transfer may halt if
979 * it finishes normally or a condition occurs that
980 * requires driver intervention. Don't want to halt
981 * the channel again. In either Slave or DMA mode,
982 * it's possible that the transfer has been assigned
983 * to a channel, but not started yet when an URB is
984 * dequeued. Don't want to halt a channel that hasn't
985 * started yet.
986 */
987 return;
988 }
989 }
990 if (chan->halt_pending) {
991 /*
992 * A halt has already been issued for this channel. This might
993 * happen when a transfer is aborted by a higher level in
994 * the stack.
995 */
996 dev_vdbg(hsotg->dev,
997 "*** %s: Channel %d, chan->halt_pending already set ***\n",
998 __func__, chan->hc_num);
999 return;
1000 }
1001
1002 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1003
1004 /* No need to set the bit in DDMA for disabling the channel */
1005 /* TODO check it everywhere channel is disabled */
1006 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001007 if (dbg_hc(chan))
1008 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001009 hcchar |= HCCHAR_CHENA;
1010 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001011 if (dbg_hc(chan))
1012 dev_dbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001013 }
1014 hcchar |= HCCHAR_CHDIS;
1015
1016 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001017 if (dbg_hc(chan))
1018 dev_vdbg(hsotg->dev, "DMA not enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001019 hcchar |= HCCHAR_CHENA;
1020
1021 /* Check for space in the request queue to issue the halt */
1022 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1023 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1024 dev_vdbg(hsotg->dev, "control/bulk\n");
1025 nptxsts = readl(hsotg->regs + GNPTXSTS);
1026 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1027 dev_vdbg(hsotg->dev, "Disabling channel\n");
1028 hcchar &= ~HCCHAR_CHENA;
1029 }
1030 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001031 if (dbg_perio())
1032 dev_vdbg(hsotg->dev, "isoc/intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001033 hptxsts = readl(hsotg->regs + HPTXSTS);
1034 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1035 hsotg->queuing_high_bandwidth) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001036 if (dbg_perio())
1037 dev_vdbg(hsotg->dev, "Disabling channel\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001038 hcchar &= ~HCCHAR_CHENA;
1039 }
1040 }
1041 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001042 if (dbg_hc(chan))
1043 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001044 }
1045
1046 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1047 chan->halt_status = halt_status;
1048
1049 if (hcchar & HCCHAR_CHENA) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001050 if (dbg_hc(chan))
1051 dev_vdbg(hsotg->dev, "Channel enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001052 chan->halt_pending = 1;
1053 chan->halt_on_queue = 0;
1054 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001055 if (dbg_hc(chan))
1056 dev_vdbg(hsotg->dev, "Channel disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001057 chan->halt_on_queue = 1;
1058 }
1059
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001060 if (dbg_hc(chan)) {
1061 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1062 chan->hc_num);
1063 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1064 hcchar);
1065 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1066 chan->halt_pending);
1067 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1068 chan->halt_on_queue);
1069 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1070 chan->halt_status);
1071 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001072}
1073
1074/**
1075 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1076 *
1077 * @hsotg: Programming view of DWC_otg controller
1078 * @chan: Identifies the host channel to clean up
1079 *
1080 * This function is normally called after a transfer is done and the host
1081 * channel is being released
1082 */
1083void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1084{
1085 u32 hcintmsk;
1086
1087 chan->xfer_started = 0;
1088
1089 /*
1090 * Clear channel interrupt enables and any unhandled channel interrupt
1091 * conditions
1092 */
1093 writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
1094 hcintmsk = 0xffffffff;
1095 hcintmsk &= ~HCINTMSK_RESERVED14_31;
1096 writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
1097}
1098
1099/**
1100 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1101 * which frame a periodic transfer should occur
1102 *
1103 * @hsotg: Programming view of DWC_otg controller
1104 * @chan: Identifies the host channel to set up and its properties
1105 * @hcchar: Current value of the HCCHAR register for the specified host channel
1106 *
1107 * This function has no effect on non-periodic transfers
1108 */
1109static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1110 struct dwc2_host_chan *chan, u32 *hcchar)
1111{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001112 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1113 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001114 /* 1 if _next_ frame is odd, 0 if it's even */
Paul Zimmerman81a58952013-06-24 11:34:23 -07001115 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001116 *hcchar |= HCCHAR_ODDFRM;
1117 }
1118}
1119
1120static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1121{
1122 /* Set up the initial PID for the transfer */
1123 if (chan->speed == USB_SPEED_HIGH) {
1124 if (chan->ep_is_in) {
1125 if (chan->multi_count == 1)
1126 chan->data_pid_start = DWC2_HC_PID_DATA0;
1127 else if (chan->multi_count == 2)
1128 chan->data_pid_start = DWC2_HC_PID_DATA1;
1129 else
1130 chan->data_pid_start = DWC2_HC_PID_DATA2;
1131 } else {
1132 if (chan->multi_count == 1)
1133 chan->data_pid_start = DWC2_HC_PID_DATA0;
1134 else
1135 chan->data_pid_start = DWC2_HC_PID_MDATA;
1136 }
1137 } else {
1138 chan->data_pid_start = DWC2_HC_PID_DATA0;
1139 }
1140}
1141
1142/**
1143 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1144 * the Host Channel
1145 *
1146 * @hsotg: Programming view of DWC_otg controller
1147 * @chan: Information needed to initialize the host channel
1148 *
1149 * This function should only be called in Slave mode. For a channel associated
1150 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1151 * associated with a periodic EP, the periodic Tx FIFO is written.
1152 *
1153 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1154 * the number of bytes written to the Tx FIFO.
1155 */
1156static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1157 struct dwc2_host_chan *chan)
1158{
1159 u32 i;
1160 u32 remaining_count;
1161 u32 byte_count;
1162 u32 dword_count;
1163 u32 __iomem *data_fifo;
1164 u32 *data_buf = (u32 *)chan->xfer_buf;
1165
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001166 if (dbg_hc(chan))
1167 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001168
1169 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1170
1171 remaining_count = chan->xfer_len - chan->xfer_count;
1172 if (remaining_count > chan->max_packet)
1173 byte_count = chan->max_packet;
1174 else
1175 byte_count = remaining_count;
1176
1177 dword_count = (byte_count + 3) / 4;
1178
1179 if (((unsigned long)data_buf & 0x3) == 0) {
1180 /* xfer_buf is DWORD aligned */
1181 for (i = 0; i < dword_count; i++, data_buf++)
1182 writel(*data_buf, data_fifo);
1183 } else {
1184 /* xfer_buf is not DWORD aligned */
1185 for (i = 0; i < dword_count; i++, data_buf++) {
1186 u32 data = data_buf[0] | data_buf[1] << 8 |
1187 data_buf[2] << 16 | data_buf[3] << 24;
1188 writel(data, data_fifo);
1189 }
1190 }
1191
1192 chan->xfer_count += byte_count;
1193 chan->xfer_buf += byte_count;
1194}
1195
1196/**
1197 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1198 * channel and starts the transfer
1199 *
1200 * @hsotg: Programming view of DWC_otg controller
1201 * @chan: Information needed to initialize the host channel. The xfer_len value
1202 * may be reduced to accommodate the max widths of the XferSize and
1203 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1204 * changed to reflect the final xfer_len value.
1205 *
1206 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1207 * the caller must ensure that there is sufficient space in the request queue
1208 * and Tx Data FIFO.
1209 *
1210 * For an OUT transfer in Slave mode, it loads a data packet into the
1211 * appropriate FIFO. If necessary, additional data packets are loaded in the
1212 * Host ISR.
1213 *
1214 * For an IN transfer in Slave mode, a data packet is requested. The data
1215 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1216 * additional data packets are requested in the Host ISR.
1217 *
1218 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1219 * register along with a packet count of 1 and the channel is enabled. This
1220 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1221 * simply set to 0 since no data transfer occurs in this case.
1222 *
1223 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1224 * all the information required to perform the subsequent data transfer. In
1225 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1226 * controller performs the entire PING protocol, then starts the data
1227 * transfer.
1228 */
1229void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1230 struct dwc2_host_chan *chan)
1231{
1232 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1233 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1234 u32 hcchar;
1235 u32 hctsiz = 0;
1236 u16 num_packets;
1237
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001238 if (dbg_hc(chan))
1239 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001240
1241 if (chan->do_ping) {
1242 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001243 if (dbg_hc(chan))
1244 dev_vdbg(hsotg->dev, "ping, no DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001245 dwc2_hc_do_ping(hsotg, chan);
1246 chan->xfer_started = 1;
1247 return;
1248 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001249 if (dbg_hc(chan))
1250 dev_vdbg(hsotg->dev, "ping, DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001251 hctsiz |= TSIZ_DOPNG;
1252 }
1253 }
1254
1255 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001256 if (dbg_hc(chan))
1257 dev_vdbg(hsotg->dev, "split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001258 num_packets = 1;
1259
1260 if (chan->complete_split && !chan->ep_is_in)
1261 /*
1262 * For CSPLIT OUT Transfer, set the size to 0 so the
1263 * core doesn't expect any data written to the FIFO
1264 */
1265 chan->xfer_len = 0;
1266 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1267 chan->xfer_len = chan->max_packet;
1268 else if (!chan->ep_is_in && chan->xfer_len > 188)
1269 chan->xfer_len = 188;
1270
1271 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1272 TSIZ_XFERSIZE_MASK;
1273 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001274 if (dbg_hc(chan))
1275 dev_vdbg(hsotg->dev, "no split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001276 /*
1277 * Ensure that the transfer length and packet count will fit
1278 * in the widths allocated for them in the HCTSIZn register
1279 */
1280 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1281 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1282 /*
1283 * Make sure the transfer size is no larger than one
1284 * (micro)frame's worth of data. (A check was done
1285 * when the periodic transfer was accepted to ensure
1286 * that a (micro)frame's worth of data can be
1287 * programmed into a channel.)
1288 */
1289 u32 max_periodic_len =
1290 chan->multi_count * chan->max_packet;
1291
1292 if (chan->xfer_len > max_periodic_len)
1293 chan->xfer_len = max_periodic_len;
1294 } else if (chan->xfer_len > max_hc_xfer_size) {
1295 /*
1296 * Make sure that xfer_len is a multiple of max packet
1297 * size
1298 */
1299 chan->xfer_len =
1300 max_hc_xfer_size - chan->max_packet + 1;
1301 }
1302
1303 if (chan->xfer_len > 0) {
1304 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1305 chan->max_packet;
1306 if (num_packets > max_hc_pkt_count) {
1307 num_packets = max_hc_pkt_count;
1308 chan->xfer_len = num_packets * chan->max_packet;
1309 }
1310 } else {
1311 /* Need 1 packet for transfer length of 0 */
1312 num_packets = 1;
1313 }
1314
1315 if (chan->ep_is_in)
1316 /*
1317 * Always program an integral # of max packets for IN
1318 * transfers
1319 */
1320 chan->xfer_len = num_packets * chan->max_packet;
1321
1322 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1323 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1324 /*
1325 * Make sure that the multi_count field matches the
1326 * actual transfer length
1327 */
1328 chan->multi_count = num_packets;
1329
1330 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1331 dwc2_set_pid_isoc(chan);
1332
1333 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1334 TSIZ_XFERSIZE_MASK;
1335 }
1336
1337 chan->start_pkt_count = num_packets;
1338 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1339 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1340 TSIZ_SC_MC_PID_MASK;
1341 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001342 if (dbg_hc(chan)) {
1343 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1344 hctsiz, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001345
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001346 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1347 chan->hc_num);
1348 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001349 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1350 TSIZ_XFERSIZE_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001351 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001352 (hctsiz & TSIZ_PKTCNT_MASK) >>
1353 TSIZ_PKTCNT_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001354 dev_vdbg(hsotg->dev, " Start PID: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001355 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1356 TSIZ_SC_MC_PID_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001357 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001358
1359 if (hsotg->core_params->dma_enable > 0) {
1360 dma_addr_t dma_addr;
1361
1362 if (chan->align_buf) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001363 if (dbg_hc(chan))
1364 dev_vdbg(hsotg->dev, "align_buf\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001365 dma_addr = chan->align_buf;
1366 } else {
1367 dma_addr = chan->xfer_dma;
1368 }
1369 writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001370 if (dbg_hc(chan))
1371 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1372 (unsigned long)dma_addr, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001373 }
1374
1375 /* Start the split */
1376 if (chan->do_split) {
1377 u32 hcsplt = readl(hsotg->regs + HCSPLT(chan->hc_num));
1378
1379 hcsplt |= HCSPLT_SPLTENA;
1380 writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
1381 }
1382
1383 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1384 hcchar &= ~HCCHAR_MULTICNT_MASK;
1385 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1386 HCCHAR_MULTICNT_MASK;
1387 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1388
1389 if (hcchar & HCCHAR_CHDIS)
1390 dev_warn(hsotg->dev,
1391 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1392 __func__, chan->hc_num, hcchar);
1393
1394 /* Set host channel enable after all other setup is complete */
1395 hcchar |= HCCHAR_CHENA;
1396 hcchar &= ~HCCHAR_CHDIS;
1397
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001398 if (dbg_hc(chan))
1399 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001400 (hcchar & HCCHAR_MULTICNT_MASK) >>
1401 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001402
1403 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001404 if (dbg_hc(chan))
1405 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1406 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001407
1408 chan->xfer_started = 1;
1409 chan->requests++;
1410
1411 if (hsotg->core_params->dma_enable <= 0 &&
1412 !chan->ep_is_in && chan->xfer_len > 0)
1413 /* Load OUT packet into the appropriate Tx FIFO */
1414 dwc2_hc_write_packet(hsotg, chan);
1415}
1416
1417/**
1418 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1419 * host channel and starts the transfer in Descriptor DMA mode
1420 *
1421 * @hsotg: Programming view of DWC_otg controller
1422 * @chan: Information needed to initialize the host channel
1423 *
1424 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1425 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1426 * with micro-frame bitmap.
1427 *
1428 * Initializes HCDMA register with descriptor list address and CTD value then
1429 * starts the transfer via enabling the channel.
1430 */
1431void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1432 struct dwc2_host_chan *chan)
1433{
1434 u32 hcchar;
1435 u32 hc_dma;
1436 u32 hctsiz = 0;
1437
1438 if (chan->do_ping)
1439 hctsiz |= TSIZ_DOPNG;
1440
1441 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1442 dwc2_set_pid_isoc(chan);
1443
1444 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1445 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1446 TSIZ_SC_MC_PID_MASK;
1447
1448 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1449 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1450
1451 /* Non-zero only for high-speed interrupt endpoints */
1452 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1453
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001454 if (dbg_hc(chan)) {
1455 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1456 chan->hc_num);
1457 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1458 chan->data_pid_start);
1459 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1460 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001461
1462 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1463
1464 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1465
1466 /* Always start from first descriptor */
1467 hc_dma &= ~HCDMA_CTD_MASK;
1468 writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001469 if (dbg_hc(chan))
1470 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1471 hc_dma, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001472
1473 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1474 hcchar &= ~HCCHAR_MULTICNT_MASK;
1475 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1476 HCCHAR_MULTICNT_MASK;
1477
1478 if (hcchar & HCCHAR_CHDIS)
1479 dev_warn(hsotg->dev,
1480 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1481 __func__, chan->hc_num, hcchar);
1482
1483 /* Set host channel enable after all other setup is complete */
1484 hcchar |= HCCHAR_CHENA;
1485 hcchar &= ~HCCHAR_CHDIS;
1486
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001487 if (dbg_hc(chan))
1488 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001489 (hcchar & HCCHAR_MULTICNT_MASK) >>
1490 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001491
1492 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001493 if (dbg_hc(chan))
1494 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1495 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001496
1497 chan->xfer_started = 1;
1498 chan->requests++;
1499}
1500
1501/**
1502 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1503 * a previous call to dwc2_hc_start_transfer()
1504 *
1505 * @hsotg: Programming view of DWC_otg controller
1506 * @chan: Information needed to initialize the host channel
1507 *
1508 * The caller must ensure there is sufficient space in the request queue and Tx
1509 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1510 * the controller acts autonomously to complete transfers programmed to a host
1511 * channel.
1512 *
1513 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1514 * if there is any data remaining to be queued. For an IN transfer, another
1515 * data packet is always requested. For the SETUP phase of a control transfer,
1516 * this function does nothing.
1517 *
1518 * Return: 1 if a new request is queued, 0 if no more requests are required
1519 * for this transfer
1520 */
1521int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1522 struct dwc2_host_chan *chan)
1523{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001524 if (dbg_hc(chan))
1525 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1526 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001527
1528 if (chan->do_split)
1529 /* SPLITs always queue just once per channel */
1530 return 0;
1531
1532 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1533 /* SETUPs are queued only once since they can't be NAK'd */
1534 return 0;
1535
1536 if (chan->ep_is_in) {
1537 /*
1538 * Always queue another request for other IN transfers. If
1539 * back-to-back INs are issued and NAKs are received for both,
1540 * the driver may still be processing the first NAK when the
1541 * second NAK is received. When the interrupt handler clears
1542 * the NAK interrupt for the first NAK, the second NAK will
1543 * not be seen. So we can't depend on the NAK interrupt
1544 * handler to requeue a NAK'd request. Instead, IN requests
1545 * are issued each time this function is called. When the
1546 * transfer completes, the extra requests for the channel will
1547 * be flushed.
1548 */
1549 u32 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1550
1551 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1552 hcchar |= HCCHAR_CHENA;
1553 hcchar &= ~HCCHAR_CHDIS;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001554 if (dbg_hc(chan))
1555 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
1556 hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001557 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1558 chan->requests++;
1559 return 1;
1560 }
1561
1562 /* OUT transfers */
1563
1564 if (chan->xfer_count < chan->xfer_len) {
1565 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1566 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1567 u32 hcchar = readl(hsotg->regs +
1568 HCCHAR(chan->hc_num));
1569
1570 dwc2_hc_set_even_odd_frame(hsotg, chan,
1571 &hcchar);
1572 }
1573
1574 /* Load OUT packet into the appropriate Tx FIFO */
1575 dwc2_hc_write_packet(hsotg, chan);
1576 chan->requests++;
1577 return 1;
1578 }
1579
1580 return 0;
1581}
1582
1583/**
1584 * dwc2_hc_do_ping() - Starts a PING transfer
1585 *
1586 * @hsotg: Programming view of DWC_otg controller
1587 * @chan: Information needed to initialize the host channel
1588 *
1589 * This function should only be called in Slave mode. The Do Ping bit is set in
1590 * the HCTSIZ register, then the channel is enabled.
1591 */
1592void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1593{
1594 u32 hcchar;
1595 u32 hctsiz;
1596
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001597 if (dbg_hc(chan))
1598 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1599 chan->hc_num);
1600
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001601
1602 hctsiz = TSIZ_DOPNG;
1603 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
1604 writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
1605
1606 hcchar = readl(hsotg->regs + HCCHAR(chan->hc_num));
1607 hcchar |= HCCHAR_CHENA;
1608 hcchar &= ~HCCHAR_CHDIS;
1609 writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
1610}
1611
1612/**
1613 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
1614 * the HFIR register according to PHY type and speed
1615 *
1616 * @hsotg: Programming view of DWC_otg controller
1617 *
1618 * NOTE: The caller can modify the value of the HFIR register only after the
1619 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
1620 * has been set
1621 */
1622u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
1623{
1624 u32 usbcfg;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001625 u32 hprt0;
1626 int clock = 60; /* default value */
1627
1628 usbcfg = readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001629 hprt0 = readl(hsotg->regs + HPRT0);
1630
1631 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
1632 !(usbcfg & GUSBCFG_PHYIF16))
1633 clock = 60;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001634 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001635 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
1636 clock = 48;
1637 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1638 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1639 clock = 30;
1640 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1641 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
1642 clock = 60;
1643 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
1644 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
1645 clock = 48;
1646 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001647 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001648 clock = 48;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001649 if ((usbcfg & GUSBCFG_PHYSEL) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001650 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001651 clock = 48;
1652
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02001653 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001654 /* High speed case */
1655 return 125 * clock;
1656 else
1657 /* FS/LS case */
1658 return 1000 * clock;
1659}
1660
1661/**
1662 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
1663 * buffer
1664 *
1665 * @core_if: Programming view of DWC_otg controller
1666 * @dest: Destination buffer for the packet
1667 * @bytes: Number of bytes to copy to the destination
1668 */
1669void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
1670{
1671 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
1672 u32 *data_buf = (u32 *)dest;
1673 int word_count = (bytes + 3) / 4;
1674 int i;
1675
1676 /*
1677 * Todo: Account for the case where dest is not dword aligned. This
1678 * requires reading data from the FIFO into a u32 temp buffer, then
1679 * moving it into the data buffer.
1680 */
1681
1682 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
1683
1684 for (i = 0; i < word_count; i++, data_buf++)
1685 *data_buf = readl(fifo);
1686}
1687
1688/**
1689 * dwc2_dump_host_registers() - Prints the host registers
1690 *
1691 * @hsotg: Programming view of DWC_otg controller
1692 *
1693 * NOTE: This function will be removed once the peripheral controller code
1694 * is integrated and the driver is stable
1695 */
1696void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
1697{
1698#ifdef DEBUG
1699 u32 __iomem *addr;
1700 int i;
1701
1702 dev_dbg(hsotg->dev, "Host Global Registers\n");
1703 addr = hsotg->regs + HCFG;
1704 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
1705 (unsigned long)addr, readl(addr));
1706 addr = hsotg->regs + HFIR;
1707 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
1708 (unsigned long)addr, readl(addr));
1709 addr = hsotg->regs + HFNUM;
1710 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
1711 (unsigned long)addr, readl(addr));
1712 addr = hsotg->regs + HPTXSTS;
1713 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
1714 (unsigned long)addr, readl(addr));
1715 addr = hsotg->regs + HAINT;
1716 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
1717 (unsigned long)addr, readl(addr));
1718 addr = hsotg->regs + HAINTMSK;
1719 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
1720 (unsigned long)addr, readl(addr));
1721 if (hsotg->core_params->dma_desc_enable > 0) {
1722 addr = hsotg->regs + HFLBADDR;
1723 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
1724 (unsigned long)addr, readl(addr));
1725 }
1726
1727 addr = hsotg->regs + HPRT0;
1728 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
1729 (unsigned long)addr, readl(addr));
1730
1731 for (i = 0; i < hsotg->core_params->host_channels; i++) {
1732 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
1733 addr = hsotg->regs + HCCHAR(i);
1734 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
1735 (unsigned long)addr, readl(addr));
1736 addr = hsotg->regs + HCSPLT(i);
1737 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
1738 (unsigned long)addr, readl(addr));
1739 addr = hsotg->regs + HCINT(i);
1740 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
1741 (unsigned long)addr, readl(addr));
1742 addr = hsotg->regs + HCINTMSK(i);
1743 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
1744 (unsigned long)addr, readl(addr));
1745 addr = hsotg->regs + HCTSIZ(i);
1746 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
1747 (unsigned long)addr, readl(addr));
1748 addr = hsotg->regs + HCDMA(i);
1749 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
1750 (unsigned long)addr, readl(addr));
1751 if (hsotg->core_params->dma_desc_enable > 0) {
1752 addr = hsotg->regs + HCDMAB(i);
1753 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
1754 (unsigned long)addr, readl(addr));
1755 }
1756 }
1757#endif
1758}
1759
1760/**
1761 * dwc2_dump_global_registers() - Prints the core global registers
1762 *
1763 * @hsotg: Programming view of DWC_otg controller
1764 *
1765 * NOTE: This function will be removed once the peripheral controller code
1766 * is integrated and the driver is stable
1767 */
1768void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
1769{
1770#ifdef DEBUG
1771 u32 __iomem *addr;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001772
1773 dev_dbg(hsotg->dev, "Core Global Registers\n");
1774 addr = hsotg->regs + GOTGCTL;
1775 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
1776 (unsigned long)addr, readl(addr));
1777 addr = hsotg->regs + GOTGINT;
1778 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
1779 (unsigned long)addr, readl(addr));
1780 addr = hsotg->regs + GAHBCFG;
1781 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
1782 (unsigned long)addr, readl(addr));
1783 addr = hsotg->regs + GUSBCFG;
1784 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
1785 (unsigned long)addr, readl(addr));
1786 addr = hsotg->regs + GRSTCTL;
1787 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
1788 (unsigned long)addr, readl(addr));
1789 addr = hsotg->regs + GINTSTS;
1790 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
1791 (unsigned long)addr, readl(addr));
1792 addr = hsotg->regs + GINTMSK;
1793 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
1794 (unsigned long)addr, readl(addr));
1795 addr = hsotg->regs + GRXSTSR;
1796 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
1797 (unsigned long)addr, readl(addr));
1798 addr = hsotg->regs + GRXFSIZ;
1799 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
1800 (unsigned long)addr, readl(addr));
1801 addr = hsotg->regs + GNPTXFSIZ;
1802 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
1803 (unsigned long)addr, readl(addr));
1804 addr = hsotg->regs + GNPTXSTS;
1805 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
1806 (unsigned long)addr, readl(addr));
1807 addr = hsotg->regs + GI2CCTL;
1808 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
1809 (unsigned long)addr, readl(addr));
1810 addr = hsotg->regs + GPVNDCTL;
1811 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
1812 (unsigned long)addr, readl(addr));
1813 addr = hsotg->regs + GGPIO;
1814 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
1815 (unsigned long)addr, readl(addr));
1816 addr = hsotg->regs + GUID;
1817 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
1818 (unsigned long)addr, readl(addr));
1819 addr = hsotg->regs + GSNPSID;
1820 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
1821 (unsigned long)addr, readl(addr));
1822 addr = hsotg->regs + GHWCFG1;
1823 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
1824 (unsigned long)addr, readl(addr));
1825 addr = hsotg->regs + GHWCFG2;
1826 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
1827 (unsigned long)addr, readl(addr));
1828 addr = hsotg->regs + GHWCFG3;
1829 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
1830 (unsigned long)addr, readl(addr));
1831 addr = hsotg->regs + GHWCFG4;
1832 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
1833 (unsigned long)addr, readl(addr));
1834 addr = hsotg->regs + GLPMCFG;
1835 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
1836 (unsigned long)addr, readl(addr));
1837 addr = hsotg->regs + GPWRDN;
1838 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
1839 (unsigned long)addr, readl(addr));
1840 addr = hsotg->regs + GDFIFOCFG;
1841 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
1842 (unsigned long)addr, readl(addr));
1843 addr = hsotg->regs + HPTXFSIZ;
1844 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
1845 (unsigned long)addr, readl(addr));
1846
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001847 addr = hsotg->regs + PCGCTL;
1848 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
1849 (unsigned long)addr, readl(addr));
1850#endif
1851}
1852
1853/**
1854 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
1855 *
1856 * @hsotg: Programming view of DWC_otg controller
1857 * @num: Tx FIFO to flush
1858 */
1859void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
1860{
1861 u32 greset;
1862 int count = 0;
1863
1864 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
1865
1866 greset = GRSTCTL_TXFFLSH;
1867 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
1868 writel(greset, hsotg->regs + GRSTCTL);
1869
1870 do {
1871 greset = readl(hsotg->regs + GRSTCTL);
1872 if (++count > 10000) {
1873 dev_warn(hsotg->dev,
1874 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
1875 __func__, greset,
1876 readl(hsotg->regs + GNPTXSTS));
1877 break;
1878 }
1879 udelay(1);
1880 } while (greset & GRSTCTL_TXFFLSH);
1881
1882 /* Wait for at least 3 PHY Clocks */
1883 udelay(1);
1884}
1885
1886/**
1887 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
1888 *
1889 * @hsotg: Programming view of DWC_otg controller
1890 */
1891void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
1892{
1893 u32 greset;
1894 int count = 0;
1895
1896 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1897
1898 greset = GRSTCTL_RXFFLSH;
1899 writel(greset, hsotg->regs + GRSTCTL);
1900
1901 do {
1902 greset = readl(hsotg->regs + GRSTCTL);
1903 if (++count > 10000) {
1904 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
1905 __func__, greset);
1906 break;
1907 }
1908 udelay(1);
1909 } while (greset & GRSTCTL_RXFFLSH);
1910
1911 /* Wait for at least 3 PHY Clocks */
1912 udelay(1);
1913}
1914
1915#define DWC2_PARAM_TEST(a, b, c) ((a) < (b) || (a) > (c))
1916
1917/* Parameter access functions */
1918int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
1919{
1920 int valid = 1;
1921 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001922
1923 switch (val) {
1924 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001925 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001926 valid = 0;
1927 break;
1928 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001929 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001930 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1931 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1932 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1933 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1934 break;
1935 default:
1936 valid = 0;
1937 break;
1938 }
1939 break;
1940 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
1941 /* always valid */
1942 break;
1943 default:
1944 valid = 0;
1945 break;
1946 }
1947
1948 if (!valid) {
1949 if (val >= 0)
1950 dev_err(hsotg->dev,
1951 "%d invalid for otg_cap parameter. Check HW configuration.\n",
1952 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001953 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001954 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
1955 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
1956 break;
1957 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
1958 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
1959 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
1960 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
1961 break;
1962 default:
1963 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
1964 break;
1965 }
1966 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
1967 retval = -EINVAL;
1968 }
1969
1970 hsotg->core_params->otg_cap = val;
1971 return retval;
1972}
1973
1974int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
1975{
1976 int valid = 1;
1977 int retval = 0;
1978
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001979 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001980 valid = 0;
1981 if (val < 0)
1982 valid = 0;
1983
1984 if (!valid) {
1985 if (val >= 0)
1986 dev_err(hsotg->dev,
1987 "%d invalid for dma_enable parameter. Check HW configuration.\n",
1988 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001989 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001990 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
1991 retval = -EINVAL;
1992 }
1993
1994 hsotg->core_params->dma_enable = val;
1995 return retval;
1996}
1997
1998int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
1999{
2000 int valid = 1;
2001 int retval = 0;
2002
2003 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002004 !hsotg->hw_params.dma_desc_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002005 valid = 0;
2006 if (val < 0)
2007 valid = 0;
2008
2009 if (!valid) {
2010 if (val >= 0)
2011 dev_err(hsotg->dev,
2012 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2013 val);
2014 val = (hsotg->core_params->dma_enable > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002015 hsotg->hw_params.dma_desc_enable);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002016 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
2017 retval = -EINVAL;
2018 }
2019
2020 hsotg->core_params->dma_desc_enable = val;
2021 return retval;
2022}
2023
2024int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2025 int val)
2026{
2027 int retval = 0;
2028
2029 if (DWC2_PARAM_TEST(val, 0, 1)) {
2030 if (val >= 0) {
2031 dev_err(hsotg->dev,
2032 "Wrong value for host_support_fs_low_power\n");
2033 dev_err(hsotg->dev,
2034 "host_support_fs_low_power must be 0 or 1\n");
2035 }
2036 val = 0;
2037 dev_dbg(hsotg->dev,
2038 "Setting host_support_fs_low_power to %d\n", val);
2039 retval = -EINVAL;
2040 }
2041
2042 hsotg->core_params->host_support_fs_ls_low_power = val;
2043 return retval;
2044}
2045
2046int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
2047{
2048 int valid = 1;
2049 int retval = 0;
2050
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002051 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002052 valid = 0;
2053 if (val < 0)
2054 valid = 0;
2055
2056 if (!valid) {
2057 if (val >= 0)
2058 dev_err(hsotg->dev,
2059 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2060 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002061 val = hsotg->hw_params.enable_dynamic_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002062 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
2063 retval = -EINVAL;
2064 }
2065
2066 hsotg->core_params->enable_dynamic_fifo = val;
2067 return retval;
2068}
2069
2070int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2071{
2072 int valid = 1;
2073 int retval = 0;
2074
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002075 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002076 valid = 0;
2077
2078 if (!valid) {
2079 if (val >= 0)
2080 dev_err(hsotg->dev,
2081 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2082 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002083 val = hsotg->hw_params.host_rx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002084 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
2085 retval = -EINVAL;
2086 }
2087
2088 hsotg->core_params->host_rx_fifo_size = val;
2089 return retval;
2090}
2091
2092int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2093{
2094 int valid = 1;
2095 int retval = 0;
2096
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002097 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002098 valid = 0;
2099
2100 if (!valid) {
2101 if (val >= 0)
2102 dev_err(hsotg->dev,
2103 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2104 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002105 val = hsotg->hw_params.host_nperio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002106 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2107 val);
2108 retval = -EINVAL;
2109 }
2110
2111 hsotg->core_params->host_nperio_tx_fifo_size = val;
2112 return retval;
2113}
2114
2115int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
2116{
2117 int valid = 1;
2118 int retval = 0;
2119
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002120 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002121 valid = 0;
2122
2123 if (!valid) {
2124 if (val >= 0)
2125 dev_err(hsotg->dev,
2126 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2127 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002128 val = hsotg->hw_params.host_perio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002129 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2130 val);
2131 retval = -EINVAL;
2132 }
2133
2134 hsotg->core_params->host_perio_tx_fifo_size = val;
2135 return retval;
2136}
2137
2138int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
2139{
2140 int valid = 1;
2141 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002142
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002143 if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002144 valid = 0;
2145
2146 if (!valid) {
2147 if (val >= 0)
2148 dev_err(hsotg->dev,
2149 "%d invalid for max_transfer_size. Check HW configuration.\n",
2150 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002151 val = hsotg->hw_params.max_transfer_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002152 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
2153 retval = -EINVAL;
2154 }
2155
2156 hsotg->core_params->max_transfer_size = val;
2157 return retval;
2158}
2159
2160int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
2161{
2162 int valid = 1;
2163 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002164
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002165 if (val < 15 || val > hsotg->hw_params.max_packet_count)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002166 valid = 0;
2167
2168 if (!valid) {
2169 if (val >= 0)
2170 dev_err(hsotg->dev,
2171 "%d invalid for max_packet_count. Check HW configuration.\n",
2172 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002173 val = hsotg->hw_params.max_packet_count;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002174 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
2175 retval = -EINVAL;
2176 }
2177
2178 hsotg->core_params->max_packet_count = val;
2179 return retval;
2180}
2181
2182int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
2183{
2184 int valid = 1;
2185 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002186
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002187 if (val < 1 || val > hsotg->hw_params.host_channels)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002188 valid = 0;
2189
2190 if (!valid) {
2191 if (val >= 0)
2192 dev_err(hsotg->dev,
2193 "%d invalid for host_channels. Check HW configuration.\n",
2194 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002195 val = hsotg->hw_params.host_channels;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002196 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
2197 retval = -EINVAL;
2198 }
2199
2200 hsotg->core_params->host_channels = val;
2201 return retval;
2202}
2203
2204int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
2205{
2206#ifndef NO_FS_PHY_HW_CHECKS
2207 int valid = 0;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002208 u32 hs_phy_type, fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002209#endif
2210 int retval = 0;
2211
2212 if (DWC2_PARAM_TEST(val, DWC2_PHY_TYPE_PARAM_FS,
2213 DWC2_PHY_TYPE_PARAM_ULPI)) {
2214 if (val >= 0) {
2215 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2216 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2217 }
2218
2219#ifndef NO_FS_PHY_HW_CHECKS
2220 valid = 0;
2221#else
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002222 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002223 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2224 retval = -EINVAL;
2225#endif
2226 }
2227
2228#ifndef NO_FS_PHY_HW_CHECKS
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002229 hs_phy_type = hsotg->hw_params.hs_phy_type;
2230 fs_phy_type = hsotg->hw_params.fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002231 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2232 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2233 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2234 valid = 1;
2235 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2236 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2237 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2238 valid = 1;
2239 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2240 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2241 valid = 1;
2242
2243 if (!valid) {
2244 if (val >= 0)
2245 dev_err(hsotg->dev,
2246 "%d invalid for phy_type. Check HW configuration.\n",
2247 val);
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002248 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002249 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2250 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2251 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2252 val = DWC2_PHY_TYPE_PARAM_UTMI;
2253 else
2254 val = DWC2_PHY_TYPE_PARAM_ULPI;
2255 }
2256 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
2257 retval = -EINVAL;
2258 }
2259#endif
2260
2261 hsotg->core_params->phy_type = val;
2262 return retval;
2263}
2264
2265static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2266{
2267 return hsotg->core_params->phy_type;
2268}
2269
2270int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
2271{
2272 int valid = 1;
2273 int retval = 0;
2274
2275 if (DWC2_PARAM_TEST(val, 0, 1)) {
2276 if (val >= 0) {
2277 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2278 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2279 }
2280 valid = 0;
2281 }
2282
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002283 if (val == DWC2_SPEED_PARAM_HIGH &&
2284 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002285 valid = 0;
2286
2287 if (!valid) {
2288 if (val >= 0)
2289 dev_err(hsotg->dev,
2290 "%d invalid for speed parameter. Check HW configuration.\n",
2291 val);
2292 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002293 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002294 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
2295 retval = -EINVAL;
2296 }
2297
2298 hsotg->core_params->speed = val;
2299 return retval;
2300}
2301
2302int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
2303{
2304 int valid = 1;
2305 int retval = 0;
2306
2307 if (DWC2_PARAM_TEST(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2308 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
2309 if (val >= 0) {
2310 dev_err(hsotg->dev,
2311 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2312 dev_err(hsotg->dev,
2313 "host_ls_low_power_phy_clk must be 0 or 1\n");
2314 }
2315 valid = 0;
2316 }
2317
2318 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2319 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2320 valid = 0;
2321
2322 if (!valid) {
2323 if (val >= 0)
2324 dev_err(hsotg->dev,
2325 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2326 val);
2327 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2328 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2329 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2330 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2331 val);
2332 retval = -EINVAL;
2333 }
2334
2335 hsotg->core_params->host_ls_low_power_phy_clk = val;
2336 return retval;
2337}
2338
2339int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
2340{
2341 int retval = 0;
2342
2343 if (DWC2_PARAM_TEST(val, 0, 1)) {
2344 if (val >= 0) {
2345 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2346 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2347 }
2348 val = 0;
2349 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
2350 retval = -EINVAL;
2351 }
2352
2353 hsotg->core_params->phy_ulpi_ddr = val;
2354 return retval;
2355}
2356
2357int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
2358{
2359 int retval = 0;
2360
2361 if (DWC2_PARAM_TEST(val, 0, 1)) {
2362 if (val >= 0) {
2363 dev_err(hsotg->dev,
2364 "Wrong value for phy_ulpi_ext_vbus\n");
2365 dev_err(hsotg->dev,
2366 "phy_ulpi_ext_vbus must be 0 or 1\n");
2367 }
2368 val = 0;
2369 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
2370 retval = -EINVAL;
2371 }
2372
2373 hsotg->core_params->phy_ulpi_ext_vbus = val;
2374 return retval;
2375}
2376
2377int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
2378{
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002379 int valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002380 int retval = 0;
2381
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002382 switch (hsotg->hw_params.utmi_phy_data_width) {
2383 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2384 valid = (val == 8);
2385 break;
2386 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2387 valid = (val == 16);
2388 break;
2389 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2390 valid = (val == 8 || val == 16);
2391 break;
2392 }
2393
2394 if (!valid) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002395 if (val >= 0) {
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002396 dev_err(hsotg->dev,
2397 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2398 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002399 }
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002400 val = (hsotg->hw_params.utmi_phy_data_width ==
2401 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002402 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
2403 retval = -EINVAL;
2404 }
2405
2406 hsotg->core_params->phy_utmi_width = val;
2407 return retval;
2408}
2409
2410int dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
2411{
2412 int retval = 0;
2413
2414 if (DWC2_PARAM_TEST(val, 0, 1)) {
2415 if (val >= 0) {
2416 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2417 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2418 }
2419 val = 0;
2420 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
2421 retval = -EINVAL;
2422 }
2423
2424 hsotg->core_params->ulpi_fs_ls = val;
2425 return retval;
2426}
2427
2428int dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
2429{
2430 int retval = 0;
2431
2432 if (DWC2_PARAM_TEST(val, 0, 1)) {
2433 if (val >= 0) {
2434 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2435 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2436 }
2437 val = 0;
2438 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
2439 retval = -EINVAL;
2440 }
2441
2442 hsotg->core_params->ts_dline = val;
2443 return retval;
2444}
2445
2446int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
2447{
2448#ifndef NO_FS_PHY_HW_CHECKS
2449 int valid = 1;
2450#endif
2451 int retval = 0;
2452
2453 if (DWC2_PARAM_TEST(val, 0, 1)) {
2454 if (val >= 0) {
2455 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2456 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2457 }
2458
2459#ifndef NO_FS_PHY_HW_CHECKS
2460 valid = 0;
2461#else
2462 val = 0;
2463 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2464 retval = -EINVAL;
2465#endif
2466 }
2467
2468#ifndef NO_FS_PHY_HW_CHECKS
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002469 if (val == 1 && !(hsotg->hw_params.i2c_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002470 valid = 0;
2471
2472 if (!valid) {
2473 if (val >= 0)
2474 dev_err(hsotg->dev,
2475 "%d invalid for i2c_enable. Check HW configuration.\n",
2476 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002477 val = hsotg->hw_params.i2c_enable;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002478 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
2479 retval = -EINVAL;
2480 }
2481#endif
2482
2483 hsotg->core_params->i2c_enable = val;
2484 return retval;
2485}
2486
2487int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
2488{
2489 int valid = 1;
2490 int retval = 0;
2491
2492 if (DWC2_PARAM_TEST(val, 0, 1)) {
2493 if (val >= 0) {
2494 dev_err(hsotg->dev,
2495 "Wrong value for en_multiple_tx_fifo,\n");
2496 dev_err(hsotg->dev,
2497 "en_multiple_tx_fifo must be 0 or 1\n");
2498 }
2499 valid = 0;
2500 }
2501
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002502 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002503 valid = 0;
2504
2505 if (!valid) {
2506 if (val >= 0)
2507 dev_err(hsotg->dev,
2508 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2509 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002510 val = hsotg->hw_params.en_multiple_tx_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002511 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
2512 retval = -EINVAL;
2513 }
2514
2515 hsotg->core_params->en_multiple_tx_fifo = val;
2516 return retval;
2517}
2518
2519int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
2520{
2521 int valid = 1;
2522 int retval = 0;
2523
2524 if (DWC2_PARAM_TEST(val, 0, 1)) {
2525 if (val >= 0) {
2526 dev_err(hsotg->dev,
2527 "'%d' invalid for parameter reload_ctl\n", val);
2528 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2529 }
2530 valid = 0;
2531 }
2532
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002533 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002534 valid = 0;
2535
2536 if (!valid) {
2537 if (val >= 0)
2538 dev_err(hsotg->dev,
2539 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2540 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002541 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002542 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
2543 retval = -EINVAL;
2544 }
2545
2546 hsotg->core_params->reload_ctl = val;
2547 return retval;
2548}
2549
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002550int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002551{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002552 if (val != -1)
2553 hsotg->core_params->ahbcfg = val;
2554 else
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002555 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
2556 GAHBCFG_HBSTLEN_SHIFT;
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002557 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002558}
2559
2560int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
2561{
2562 int retval = 0;
2563
2564 if (DWC2_PARAM_TEST(val, 0, 1)) {
2565 if (val >= 0) {
2566 dev_err(hsotg->dev,
2567 "'%d' invalid for parameter otg_ver\n", val);
2568 dev_err(hsotg->dev,
2569 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2570 }
2571 val = 0;
2572 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
2573 retval = -EINVAL;
2574 }
2575
2576 hsotg->core_params->otg_ver = val;
2577 return retval;
2578}
2579
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002580/**
2581 * During device initialization, read various hardware configuration
2582 * registers and interpret the contents.
2583 */
2584int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
2585{
2586 struct dwc2_hw_params *hw = &hsotg->hw_params;
2587 unsigned width;
2588 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
2589 u32 hptxfsiz, grxfsiz, gnptxfsiz;
2590 u32 gusbcfg;
2591
2592 /*
2593 * Attempt to ensure this device is really a DWC_otg Controller.
2594 * Read and verify the GSNPSID register contents. The value should be
2595 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
2596 * as in "OTG version 2.xx" or "OTG version 3.xx".
2597 */
2598 hw->snpsid = readl(hsotg->regs + GSNPSID);
2599 if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
2600 (hw->snpsid & 0xfffff000) != 0x4f543000) {
2601 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
2602 hw->snpsid);
2603 return -ENODEV;
2604 }
2605
2606 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
2607 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
2608 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
2609
2610 hwcfg1 = readl(hsotg->regs + GHWCFG1);
2611 hwcfg2 = readl(hsotg->regs + GHWCFG2);
2612 hwcfg3 = readl(hsotg->regs + GHWCFG3);
2613 hwcfg4 = readl(hsotg->regs + GHWCFG4);
2614 gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
2615 grxfsiz = readl(hsotg->regs + GRXFSIZ);
2616
2617 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
2618 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
2619 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
2620 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
2621 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
2622 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
2623
2624 /* Force host mode to get HPTXFSIZ exact power on value */
2625 gusbcfg = readl(hsotg->regs + GUSBCFG);
2626 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
2627 writel(gusbcfg, hsotg->regs + GUSBCFG);
2628 usleep_range(100000, 150000);
2629
2630 hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
2631 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
2632 gusbcfg = readl(hsotg->regs + GUSBCFG);
2633 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
2634 writel(gusbcfg, hsotg->regs + GUSBCFG);
2635 usleep_range(100000, 150000);
2636
2637 /* hwcfg2 */
2638 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
2639 GHWCFG2_OP_MODE_SHIFT;
2640 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
2641 GHWCFG2_ARCHITECTURE_SHIFT;
2642 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
2643 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
2644 GHWCFG2_NUM_HOST_CHAN_SHIFT);
2645 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
2646 GHWCFG2_HS_PHY_TYPE_SHIFT;
2647 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
2648 GHWCFG2_FS_PHY_TYPE_SHIFT;
2649 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
2650 GHWCFG2_NUM_DEV_EP_SHIFT;
2651 hw->nperio_tx_q_depth =
2652 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
2653 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
2654 hw->host_perio_tx_q_depth =
2655 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
2656 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
2657 hw->dev_token_q_depth =
2658 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
2659 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
2660
2661 /* hwcfg3 */
2662 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
2663 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
2664 hw->max_transfer_size = (1 << (width + 11)) - 1;
2665 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
2666 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
2667 hw->max_packet_count = (1 << (width + 4)) - 1;
2668 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
2669 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
2670 GHWCFG3_DFIFO_DEPTH_SHIFT;
2671
2672 /* hwcfg4 */
2673 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
2674 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
2675 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
2676 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
2677 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002678 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
2679 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002680
2681 /* fifo sizes */
2682 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
2683 GRXFSIZ_DEPTH_SHIFT;
2684 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2685 FIFOSIZE_DEPTH_SHIFT;
2686 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
2687 FIFOSIZE_DEPTH_SHIFT;
2688
2689 dev_dbg(hsotg->dev, "Detected values from hardware:\n");
2690 dev_dbg(hsotg->dev, " op_mode=%d\n",
2691 hw->op_mode);
2692 dev_dbg(hsotg->dev, " arch=%d\n",
2693 hw->arch);
2694 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
2695 hw->dma_desc_enable);
2696 dev_dbg(hsotg->dev, " power_optimized=%d\n",
2697 hw->power_optimized);
2698 dev_dbg(hsotg->dev, " i2c_enable=%d\n",
2699 hw->i2c_enable);
2700 dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
2701 hw->hs_phy_type);
2702 dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
2703 hw->fs_phy_type);
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002704 dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n",
2705 hw->utmi_phy_data_width);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002706 dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
2707 hw->num_dev_ep);
2708 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
2709 hw->num_dev_perio_in_ep);
2710 dev_dbg(hsotg->dev, " host_channels=%d\n",
2711 hw->host_channels);
2712 dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
2713 hw->max_transfer_size);
2714 dev_dbg(hsotg->dev, " max_packet_count=%d\n",
2715 hw->max_packet_count);
2716 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
2717 hw->nperio_tx_q_depth);
2718 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
2719 hw->host_perio_tx_q_depth);
2720 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
2721 hw->dev_token_q_depth);
2722 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
2723 hw->enable_dynamic_fifo);
2724 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
2725 hw->en_multiple_tx_fifo);
2726 dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
2727 hw->total_fifo_size);
2728 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
2729 hw->host_rx_fifo_size);
2730 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
2731 hw->host_nperio_tx_fifo_size);
2732 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
2733 hw->host_perio_tx_fifo_size);
2734 dev_dbg(hsotg->dev, "\n");
2735
2736 return 0;
2737}
2738
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002739/*
2740 * This function is called during module intialization to pass module parameters
2741 * for the DWC_otg core. It returns non-0 if any parameters are invalid.
2742 */
2743int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
Stephen Warren90dbcea2013-04-29 19:49:08 +00002744 const struct dwc2_core_params *params)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002745{
2746 int retval = 0;
2747
2748 dev_dbg(hsotg->dev, "%s()\n", __func__);
2749
2750 retval |= dwc2_set_param_otg_cap(hsotg, params->otg_cap);
2751 retval |= dwc2_set_param_dma_enable(hsotg, params->dma_enable);
2752 retval |= dwc2_set_param_dma_desc_enable(hsotg,
2753 params->dma_desc_enable);
2754 retval |= dwc2_set_param_host_support_fs_ls_low_power(hsotg,
2755 params->host_support_fs_ls_low_power);
2756 retval |= dwc2_set_param_enable_dynamic_fifo(hsotg,
2757 params->enable_dynamic_fifo);
2758 retval |= dwc2_set_param_host_rx_fifo_size(hsotg,
2759 params->host_rx_fifo_size);
2760 retval |= dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
2761 params->host_nperio_tx_fifo_size);
2762 retval |= dwc2_set_param_host_perio_tx_fifo_size(hsotg,
2763 params->host_perio_tx_fifo_size);
2764 retval |= dwc2_set_param_max_transfer_size(hsotg,
2765 params->max_transfer_size);
2766 retval |= dwc2_set_param_max_packet_count(hsotg,
2767 params->max_packet_count);
2768 retval |= dwc2_set_param_host_channels(hsotg, params->host_channels);
2769 retval |= dwc2_set_param_phy_type(hsotg, params->phy_type);
2770 retval |= dwc2_set_param_speed(hsotg, params->speed);
2771 retval |= dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
2772 params->host_ls_low_power_phy_clk);
2773 retval |= dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
2774 retval |= dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
2775 params->phy_ulpi_ext_vbus);
2776 retval |= dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
2777 retval |= dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
2778 retval |= dwc2_set_param_ts_dline(hsotg, params->ts_dline);
2779 retval |= dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
2780 retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
2781 params->en_multiple_tx_fifo);
2782 retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002783 retval |= dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002784 retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
2785
2786 return retval;
2787}
2788
2789u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
2790{
2791 return (u16)(hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103);
2792}
2793
2794int dwc2_check_core_status(struct dwc2_hsotg *hsotg)
2795{
2796 if (readl(hsotg->regs + GSNPSID) == 0xffffffff)
2797 return -1;
2798 else
2799 return 0;
2800}
2801
2802/**
2803 * dwc2_enable_global_interrupts() - Enables the controller's Global
2804 * Interrupt in the AHB Config register
2805 *
2806 * @hsotg: Programming view of DWC_otg controller
2807 */
2808void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
2809{
2810 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2811
2812 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
2813 writel(ahbcfg, hsotg->regs + GAHBCFG);
2814}
2815
2816/**
2817 * dwc2_disable_global_interrupts() - Disables the controller's Global
2818 * Interrupt in the AHB Config register
2819 *
2820 * @hsotg: Programming view of DWC_otg controller
2821 */
2822void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
2823{
2824 u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
2825
2826 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
2827 writel(ahbcfg, hsotg->regs + GAHBCFG);
2828}
2829
2830MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
2831MODULE_AUTHOR("Synopsys, Inc.");
2832MODULE_LICENSE("Dual BSD/GPL");