blob: fc0521aeed77d8a54c28fd49ddc856f630252b89 [file] [log] [blame]
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001/*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/dma-mapping.h>
48#include <linux/delay.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/usb.h>
52
53#include <linux/usb/hcd.h>
54#include <linux/usb/ch11.h>
55
56#include "core.h"
57#include "hcd.h"
58
Gregory Herrerod17ee772015-04-29 22:09:01 +020059#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
60/**
61 * dwc2_backup_host_registers() - Backup controller host registers.
62 * When suspending usb bus, registers needs to be backuped
63 * if controller power is disabled once suspended.
64 *
65 * @hsotg: Programming view of the DWC_otg controller
66 */
67static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
68{
69 struct dwc2_hregs_backup *hr;
70 int i;
71
72 dev_dbg(hsotg->dev, "%s\n", __func__);
73
74 /* Backup Host regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +020075 hr = &hsotg->hr_backup;
Antti Seppälä95c8bc32015-08-20 21:41:07 +030076 hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
77 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +020078 for (i = 0; i < hsotg->core_params->host_channels; ++i)
Antti Seppälä95c8bc32015-08-20 21:41:07 +030079 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +020080
Antti Seppälä95c8bc32015-08-20 21:41:07 +030081 hr->hprt0 = dwc2_readl(hsotg->regs + HPRT0);
82 hr->hfir = dwc2_readl(hsotg->regs + HFIR);
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +020083 hr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +020084
85 return 0;
86}
87
88/**
89 * dwc2_restore_host_registers() - Restore controller host registers.
90 * When resuming usb bus, device registers needs to be restored
91 * if controller power were disabled.
92 *
93 * @hsotg: Programming view of the DWC_otg controller
94 */
95static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
96{
97 struct dwc2_hregs_backup *hr;
98 int i;
99
100 dev_dbg(hsotg->dev, "%s\n", __func__);
101
102 /* Restore host regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200103 hr = &hsotg->hr_backup;
104 if (!hr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200105 dev_err(hsotg->dev, "%s: no host registers to restore\n",
106 __func__);
107 return -EINVAL;
108 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200109 hr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200110
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300111 dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
112 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200113
114 for (i = 0; i < hsotg->core_params->host_channels; ++i)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300115 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200116
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300117 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
118 dwc2_writel(hr->hfir, hsotg->regs + HFIR);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200119
120 return 0;
121}
122#else
123static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
124{ return 0; }
125
126static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
127{ return 0; }
128#endif
129
130#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
131 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
132/**
133 * dwc2_backup_device_registers() - Backup controller device registers.
134 * When suspending usb bus, registers needs to be backuped
135 * if controller power is disabled once suspended.
136 *
137 * @hsotg: Programming view of the DWC_otg controller
138 */
139static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
140{
141 struct dwc2_dregs_backup *dr;
142 int i;
143
144 dev_dbg(hsotg->dev, "%s\n", __func__);
145
146 /* Backup dev regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200147 dr = &hsotg->dr_backup;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200148
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300149 dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
150 dr->dctl = dwc2_readl(hsotg->regs + DCTL);
151 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
152 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
153 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200154
155 for (i = 0; i < hsotg->num_of_eps; i++) {
156 /* Backup IN EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300157 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200158
159 /* Ensure DATA PID is correctly configured */
160 if (dr->diepctl[i] & DXEPCTL_DPID)
161 dr->diepctl[i] |= DXEPCTL_SETD1PID;
162 else
163 dr->diepctl[i] |= DXEPCTL_SETD0PID;
164
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300165 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
166 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200167
168 /* Backup OUT EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300169 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200170
171 /* Ensure DATA PID is correctly configured */
172 if (dr->doepctl[i] & DXEPCTL_DPID)
173 dr->doepctl[i] |= DXEPCTL_SETD1PID;
174 else
175 dr->doepctl[i] |= DXEPCTL_SETD0PID;
176
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300177 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
178 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200179 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200180 dr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200181 return 0;
182}
183
184/**
185 * dwc2_restore_device_registers() - Restore controller device registers.
186 * When resuming usb bus, device registers needs to be restored
187 * if controller power were disabled.
188 *
189 * @hsotg: Programming view of the DWC_otg controller
190 */
191static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
192{
193 struct dwc2_dregs_backup *dr;
194 u32 dctl;
195 int i;
196
197 dev_dbg(hsotg->dev, "%s\n", __func__);
198
199 /* Restore dev regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200200 dr = &hsotg->dr_backup;
201 if (!dr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200202 dev_err(hsotg->dev, "%s: no device registers to restore\n",
203 __func__);
204 return -EINVAL;
205 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200206 dr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200207
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300208 dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
209 dwc2_writel(dr->dctl, hsotg->regs + DCTL);
210 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
211 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
212 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200213
214 for (i = 0; i < hsotg->num_of_eps; i++) {
215 /* Restore IN EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300216 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
217 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
218 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200219
220 /* Restore OUT EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300221 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
222 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
223 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200224 }
225
226 /* Set the Power-On Programming done bit */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300227 dctl = dwc2_readl(hsotg->regs + DCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200228 dctl |= DCTL_PWRONPRGDONE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300229 dwc2_writel(dctl, hsotg->regs + DCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200230
231 return 0;
232}
233#else
234static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
235{ return 0; }
236
237static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
238{ return 0; }
239#endif
240
241/**
242 * dwc2_backup_global_registers() - Backup global controller registers.
243 * When suspending usb bus, registers needs to be backuped
244 * if controller power is disabled once suspended.
245 *
246 * @hsotg: Programming view of the DWC_otg controller
247 */
248static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
249{
250 struct dwc2_gregs_backup *gr;
251 int i;
252
253 /* Backup global regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200254 gr = &hsotg->gr_backup;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200255
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300256 gr->gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
257 gr->gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
258 gr->gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
259 gr->gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
260 gr->grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
261 gr->gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
262 gr->hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
263 gr->gdfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200264 for (i = 0; i < MAX_EPS_CHANNELS; i++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300265 gr->dtxfsiz[i] = dwc2_readl(hsotg->regs + DPTXFSIZN(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200266
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200267 gr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200268 return 0;
269}
270
271/**
272 * dwc2_restore_global_registers() - Restore controller global registers.
273 * When resuming usb bus, device registers needs to be restored
274 * if controller power were disabled.
275 *
276 * @hsotg: Programming view of the DWC_otg controller
277 */
278static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
279{
280 struct dwc2_gregs_backup *gr;
281 int i;
282
283 dev_dbg(hsotg->dev, "%s\n", __func__);
284
285 /* Restore global regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200286 gr = &hsotg->gr_backup;
287 if (!gr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200288 dev_err(hsotg->dev, "%s: no global registers to restore\n",
289 __func__);
290 return -EINVAL;
291 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200292 gr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200293
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300294 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
295 dwc2_writel(gr->gotgctl, hsotg->regs + GOTGCTL);
296 dwc2_writel(gr->gintmsk, hsotg->regs + GINTMSK);
297 dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
298 dwc2_writel(gr->gahbcfg, hsotg->regs + GAHBCFG);
299 dwc2_writel(gr->grxfsiz, hsotg->regs + GRXFSIZ);
300 dwc2_writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ);
301 dwc2_writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ);
302 dwc2_writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200303 for (i = 0; i < MAX_EPS_CHANNELS; i++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300304 dwc2_writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200305
306 return 0;
307}
308
309/**
310 * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
311 *
312 * @hsotg: Programming view of the DWC_otg controller
313 * @restore: Controller registers need to be restored
314 */
315int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
316{
317 u32 pcgcctl;
318 int ret = 0;
319
Gregory Herrero285046a2015-04-29 22:09:19 +0200320 if (!hsotg->core_params->hibernation)
321 return -ENOTSUPP;
322
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300323 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200324 pcgcctl &= ~PCGCTL_STOPPCLK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300325 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200326
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300327 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200328 pcgcctl &= ~PCGCTL_PWRCLMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300329 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200330
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300331 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200332 pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300333 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200334
335 udelay(100);
336 if (restore) {
337 ret = dwc2_restore_global_registers(hsotg);
338 if (ret) {
339 dev_err(hsotg->dev, "%s: failed to restore registers\n",
340 __func__);
341 return ret;
342 }
343 if (dwc2_is_host_mode(hsotg)) {
344 ret = dwc2_restore_host_registers(hsotg);
345 if (ret) {
346 dev_err(hsotg->dev, "%s: failed to restore host registers\n",
347 __func__);
348 return ret;
349 }
350 } else {
351 ret = dwc2_restore_device_registers(hsotg);
352 if (ret) {
353 dev_err(hsotg->dev, "%s: failed to restore device registers\n",
354 __func__);
355 return ret;
356 }
357 }
358 }
359
360 return ret;
361}
362
363/**
364 * dwc2_enter_hibernation() - Put controller in Partial Power Down.
365 *
366 * @hsotg: Programming view of the DWC_otg controller
367 */
368int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
369{
370 u32 pcgcctl;
371 int ret = 0;
372
Gregory Herrero285046a2015-04-29 22:09:19 +0200373 if (!hsotg->core_params->hibernation)
374 return -ENOTSUPP;
375
Gregory Herrerod17ee772015-04-29 22:09:01 +0200376 /* Backup all registers */
377 ret = dwc2_backup_global_registers(hsotg);
378 if (ret) {
379 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
380 __func__);
381 return ret;
382 }
383
384 if (dwc2_is_host_mode(hsotg)) {
385 ret = dwc2_backup_host_registers(hsotg);
386 if (ret) {
387 dev_err(hsotg->dev, "%s: failed to backup host registers\n",
388 __func__);
389 return ret;
390 }
391 } else {
392 ret = dwc2_backup_device_registers(hsotg);
393 if (ret) {
394 dev_err(hsotg->dev, "%s: failed to backup device registers\n",
395 __func__);
396 return ret;
397 }
398 }
399
400 /* Put the controller in low power state */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300401 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200402
403 pcgcctl |= PCGCTL_PWRCLMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300404 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200405 ndelay(20);
406
407 pcgcctl |= PCGCTL_RSTPDWNMODULE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300408 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200409 ndelay(20);
410
411 pcgcctl |= PCGCTL_STOPPCLK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300412 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200413
414 return ret;
415}
416
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700417/**
418 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
419 * used in both device and host modes
420 *
421 * @hsotg: Programming view of the DWC_otg controller
422 */
423static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
424{
425 u32 intmsk;
426
427 /* Clear any pending OTG Interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300428 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700429
430 /* Clear any pending interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300431 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700432
433 /* Enable the interrupts in the GINTMSK */
434 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
435
436 if (hsotg->core_params->dma_enable <= 0)
437 intmsk |= GINTSTS_RXFLVL;
Gregory Herreroa6d249d2015-04-29 22:09:04 +0200438 if (hsotg->core_params->external_id_pin_ctl <= 0)
439 intmsk |= GINTSTS_CONIDSTSCHNG;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700440
Gregory Herreroa6d249d2015-04-29 22:09:04 +0200441 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700442 GINTSTS_SESSREQINT;
443
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300444 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700445}
446
447/*
448 * Initializes the FSLSPClkSel field of the HCFG register depending on the
449 * PHY type
450 */
451static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
452{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700453 u32 hcfg, val;
454
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200455 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
456 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700457 hsotg->core_params->ulpi_fs_ls > 0) ||
458 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
459 /* Full speed PHY */
460 val = HCFG_FSLSPCLKSEL_48_MHZ;
461 } else {
462 /* High speed PHY running at full speed or high speed */
463 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
464 }
465
466 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300467 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700468 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200469 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300470 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700471}
472
473/*
474 * Do core a soft reset of the core. Be careful with this because it
475 * resets all the internal state machines of the core.
476 */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100477static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700478{
479 u32 greset;
480 int count = 0;
Kever Yangc0155b92014-08-06 09:01:50 +0800481 u32 gusbcfg;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700482
483 dev_vdbg(hsotg->dev, "%s()\n", __func__);
484
485 /* Wait for AHB master IDLE state */
486 do {
487 usleep_range(20000, 40000);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300488 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700489 if (++count > 50) {
490 dev_warn(hsotg->dev,
491 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
492 __func__, greset);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100493 return -EBUSY;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700494 }
495 } while (!(greset & GRSTCTL_AHBIDLE));
496
497 /* Core Soft Reset */
498 count = 0;
499 greset |= GRSTCTL_CSFTRST;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300500 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700501 do {
502 usleep_range(20000, 40000);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300503 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700504 if (++count > 50) {
505 dev_warn(hsotg->dev,
506 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
507 __func__, greset);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100508 return -EBUSY;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700509 }
510 } while (greset & GRSTCTL_CSFTRST);
511
Kever Yangc0155b92014-08-06 09:01:50 +0800512 if (hsotg->dr_mode == USB_DR_MODE_HOST) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300513 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800514 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
515 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300516 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800517 } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300518 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800519 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
520 gusbcfg |= GUSBCFG_FORCEDEVMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300521 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800522 } else if (hsotg->dr_mode == USB_DR_MODE_OTG) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300523 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800524 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
525 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300526 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800527 }
528
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700529 /*
530 * NOTE: This long sleep is _very_ important, otherwise the core will
531 * not stay in host mode after a connector ID change!
532 */
533 usleep_range(150000, 200000);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100534
535 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700536}
537
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100538static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700539{
540 u32 usbcfg, i2cctl;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100541 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700542
543 /*
544 * core_init() is now called on every switch so only call the
545 * following for the first time through
546 */
547 if (select_phy) {
548 dev_dbg(hsotg->dev, "FS PHY selected\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300549 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700550 usbcfg |= GUSBCFG_PHYSEL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300551 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700552
553 /* Reset after a PHY select */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100554 retval = dwc2_core_reset(hsotg);
555 if (retval) {
556 dev_err(hsotg->dev, "%s() Reset failed, aborting",
557 __func__);
558 return retval;
559 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700560 }
561
562 /*
563 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
564 * do this on HNP Dev/Host mode switches (done in dev_init and
565 * host_init).
566 */
567 if (dwc2_is_host_mode(hsotg))
568 dwc2_init_fs_ls_pclk_sel(hsotg);
569
570 if (hsotg->core_params->i2c_enable > 0) {
571 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
572
573 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300574 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700575 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300576 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700577
578 /* Program GI2CCTL.I2CEn */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300579 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700580 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
581 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
582 i2cctl &= ~GI2CCTL_I2CEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300583 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700584 i2cctl |= GI2CCTL_I2CEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300585 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700586 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100587
588 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700589}
590
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100591static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700592{
593 u32 usbcfg;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100594 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700595
596 if (!select_phy)
Paul Zimmermana23666c2014-02-04 11:42:15 -0800597 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700598
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300599 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700600
601 /*
602 * HS PHY parameters. These parameters are preserved during soft reset
603 * so only program the first time. Do a soft reset immediately after
604 * setting phyif.
605 */
606 switch (hsotg->core_params->phy_type) {
607 case DWC2_PHY_TYPE_PARAM_ULPI:
608 /* ULPI interface */
609 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
610 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
611 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
612 if (hsotg->core_params->phy_ulpi_ddr > 0)
613 usbcfg |= GUSBCFG_DDRSEL;
614 break;
615 case DWC2_PHY_TYPE_PARAM_UTMI:
616 /* UTMI+ interface */
617 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
618 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
619 if (hsotg->core_params->phy_utmi_width == 16)
620 usbcfg |= GUSBCFG_PHYIF16;
621 break;
622 default:
623 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
624 break;
625 }
626
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300627 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700628
629 /* Reset after setting the PHY parameters */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100630 retval = dwc2_core_reset(hsotg);
631 if (retval) {
632 dev_err(hsotg->dev, "%s() Reset failed, aborting",
633 __func__);
634 return retval;
635 }
636
637 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700638}
639
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100640static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700641{
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200642 u32 usbcfg;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100643 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700644
645 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
646 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
647 /* If FS mode with FS PHY */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100648 retval = dwc2_fs_phy_init(hsotg, select_phy);
649 if (retval)
650 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700651 } else {
652 /* High speed PHY */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100653 retval = dwc2_hs_phy_init(hsotg, select_phy);
654 if (retval)
655 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700656 }
657
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200658 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
659 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700660 hsotg->core_params->ulpi_fs_ls > 0) {
661 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300662 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700663 usbcfg |= GUSBCFG_ULPI_FS_LS;
664 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300665 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700666 } else {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300667 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700668 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
669 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300670 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700671 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100672
673 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700674}
675
676static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
677{
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300678 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700679
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200680 switch (hsotg->hw_params.arch) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700681 case GHWCFG2_EXT_DMA_ARCH:
682 dev_err(hsotg->dev, "External DMA Mode not supported\n");
683 return -EINVAL;
684
685 case GHWCFG2_INT_DMA_ARCH:
686 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700687 if (hsotg->core_params->ahbcfg != -1) {
688 ahbcfg &= GAHBCFG_CTRL_MASK;
689 ahbcfg |= hsotg->core_params->ahbcfg &
690 ~GAHBCFG_CTRL_MASK;
691 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700692 break;
693
694 case GHWCFG2_SLAVE_ONLY_ARCH:
695 default:
696 dev_dbg(hsotg->dev, "Slave Only Mode\n");
697 break;
698 }
699
700 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
701 hsotg->core_params->dma_enable,
702 hsotg->core_params->dma_desc_enable);
703
704 if (hsotg->core_params->dma_enable > 0) {
705 if (hsotg->core_params->dma_desc_enable > 0)
706 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
707 else
708 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
709 } else {
710 dev_dbg(hsotg->dev, "Using Slave mode\n");
711 hsotg->core_params->dma_desc_enable = 0;
712 }
713
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700714 if (hsotg->core_params->dma_enable > 0)
715 ahbcfg |= GAHBCFG_DMA_EN;
716
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300717 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700718
719 return 0;
720}
721
722static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
723{
724 u32 usbcfg;
725
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300726 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700727 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
728
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200729 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700730 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
731 if (hsotg->core_params->otg_cap ==
732 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
733 usbcfg |= GUSBCFG_HNPCAP;
734 if (hsotg->core_params->otg_cap !=
735 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
736 usbcfg |= GUSBCFG_SRPCAP;
737 break;
738
739 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
740 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
741 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
742 if (hsotg->core_params->otg_cap !=
743 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
744 usbcfg |= GUSBCFG_SRPCAP;
745 break;
746
747 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
748 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
749 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
750 default:
751 break;
752 }
753
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300754 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700755}
756
757/**
758 * dwc2_core_init() - Initializes the DWC_otg controller registers and
759 * prepares the core for device mode or host mode operation
760 *
761 * @hsotg: Programming view of the DWC_otg controller
762 * @select_phy: If true then also set the Phy type
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200763 * @irq: If >= 0, the irq to register
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700764 */
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200765int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700766{
767 u32 usbcfg, otgctl;
768 int retval;
769
770 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
771
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300772 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700773
774 /* Set ULPI External VBUS bit if needed */
775 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
776 if (hsotg->core_params->phy_ulpi_ext_vbus ==
777 DWC2_PHY_ULPI_EXTERNAL_VBUS)
778 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
779
780 /* Set external TS Dline pulsing bit if needed */
781 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
782 if (hsotg->core_params->ts_dline > 0)
783 usbcfg |= GUSBCFG_TERMSELDLPULSE;
784
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300785 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700786
787 /* Reset the Controller */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100788 retval = dwc2_core_reset(hsotg);
789 if (retval) {
790 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
791 __func__);
792 return retval;
793 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700794
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700795 /*
796 * This needs to happen in FS mode before any other programming occurs
797 */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100798 retval = dwc2_phy_init(hsotg, select_phy);
799 if (retval)
800 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700801
802 /* Program the GAHBCFG Register */
803 retval = dwc2_gahbcfg_init(hsotg);
804 if (retval)
805 return retval;
806
807 /* Program the GUSBCFG register */
808 dwc2_gusbcfg_init(hsotg);
809
810 /* Program the GOTGCTL register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300811 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700812 otgctl &= ~GOTGCTL_OTGVER;
813 if (hsotg->core_params->otg_ver > 0)
814 otgctl |= GOTGCTL_OTGVER;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300815 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700816 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
817
818 /* Clear the SRP success bit for FS-I2c */
819 hsotg->srp_success = 0;
820
821 /* Enable common interrupts */
822 dwc2_enable_common_interrupts(hsotg);
823
824 /*
Mickael Maison997f4f82014-12-23 17:39:45 +0100825 * Do device or host initialization based on mode during PCD and
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700826 * HCD initialization
827 */
828 if (dwc2_is_host_mode(hsotg)) {
829 dev_dbg(hsotg->dev, "Host Mode\n");
830 hsotg->op_state = OTG_STATE_A_HOST;
831 } else {
832 dev_dbg(hsotg->dev, "Device Mode\n");
833 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
834 }
835
836 return 0;
837}
838
839/**
840 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
841 *
842 * @hsotg: Programming view of DWC_otg controller
843 */
844void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
845{
846 u32 intmsk;
847
848 dev_dbg(hsotg->dev, "%s()\n", __func__);
849
850 /* Disable all interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300851 dwc2_writel(0, hsotg->regs + GINTMSK);
852 dwc2_writel(0, hsotg->regs + HAINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700853
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700854 /* Enable the common interrupts */
855 dwc2_enable_common_interrupts(hsotg);
856
857 /* Enable host mode interrupts without disturbing common interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300858 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700859 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300860 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700861}
862
863/**
864 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
865 *
866 * @hsotg: Programming view of DWC_otg controller
867 */
868void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
869{
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300870 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700871
872 /* Disable host mode interrupts without disturbing common interrupts */
873 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
874 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300875 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700876}
877
Dinh Nguyen112fe8e2014-05-07 08:31:29 -0500878/*
879 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
880 * For system that have a total fifo depth that is smaller than the default
881 * RX + TX fifo size.
882 *
883 * @hsotg: Programming view of DWC_otg controller
884 */
885static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
886{
887 struct dwc2_core_params *params = hsotg->core_params;
888 struct dwc2_hw_params *hw = &hsotg->hw_params;
889 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
890
891 total_fifo_size = hw->total_fifo_size;
892 rxfsiz = params->host_rx_fifo_size;
893 nptxfsiz = params->host_nperio_tx_fifo_size;
894 ptxfsiz = params->host_perio_tx_fifo_size;
895
896 /*
897 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
898 * allocation with support for high bandwidth endpoints. Synopsys
899 * defines MPS(Max Packet size) for a periodic EP=1024, and for
900 * non-periodic as 512.
901 */
902 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
903 /*
904 * For Buffer DMA mode/Scatter Gather DMA mode
905 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
906 * with n = number of host channel.
907 * 2 * ((1024/4) + 2) = 516
908 */
909 rxfsiz = 516 + hw->host_channels;
910
911 /*
912 * min non-periodic tx fifo depth
913 * 2 * (largest non-periodic USB packet used / 4)
914 * 2 * (512/4) = 256
915 */
916 nptxfsiz = 256;
917
918 /*
919 * min periodic tx fifo depth
920 * (largest packet size*MC)/4
921 * (1024 * 3)/4 = 768
922 */
923 ptxfsiz = 768;
924
925 params->host_rx_fifo_size = rxfsiz;
926 params->host_nperio_tx_fifo_size = nptxfsiz;
927 params->host_perio_tx_fifo_size = ptxfsiz;
928 }
929
930 /*
931 * If the summation of RX, NPTX and PTX fifo sizes is still
932 * bigger than the total_fifo_size, then we have a problem.
933 *
934 * We won't be able to allocate as many endpoints. Right now,
935 * we're just printing an error message, but ideally this FIFO
936 * allocation algorithm would be improved in the future.
937 *
938 * FIXME improve this FIFO allocation algorithm.
939 */
940 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
941 dev_err(hsotg->dev, "invalid fifo sizes\n");
942}
943
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700944static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
945{
946 struct dwc2_core_params *params = hsotg->core_params;
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200947 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700948
Matthijs Kooijman12086052013-04-29 19:46:35 +0000949 if (!params->enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700950 return;
951
Dinh Nguyen112fe8e2014-05-07 08:31:29 -0500952 dwc2_calculate_dynamic_fifo(hsotg);
953
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700954 /* Rx FIFO */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300955 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200956 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
957 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
958 grxfsiz |= params->host_rx_fifo_size <<
959 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300960 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
961 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
962 dwc2_readl(hsotg->regs + GRXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700963
964 /* Non-periodic Tx FIFO */
965 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300966 dwc2_readl(hsotg->regs + GNPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700967 nptxfsiz = params->host_nperio_tx_fifo_size <<
968 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
969 nptxfsiz |= params->host_rx_fifo_size <<
970 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300971 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700972 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300973 dwc2_readl(hsotg->regs + GNPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700974
975 /* Periodic Tx FIFO */
976 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300977 dwc2_readl(hsotg->regs + HPTXFSIZ));
Matthijs Kooijmanc35205a2013-08-30 18:45:18 +0200978 hptxfsiz = params->host_perio_tx_fifo_size <<
979 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
980 hptxfsiz |= (params->host_rx_fifo_size +
981 params->host_nperio_tx_fifo_size) <<
982 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300983 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700984 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300985 dwc2_readl(hsotg->regs + HPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700986
987 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200988 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700989 /*
990 * Global DFIFOCFG calculation for Host mode -
991 * include RxFIFO, NPTXFIFO and HPTXFIFO
992 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300993 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700994 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
Matthijs Kooijman08b9f9d2013-08-30 18:45:19 +0200995 dfifocfg |= (params->host_rx_fifo_size +
996 params->host_nperio_tx_fifo_size +
997 params->host_perio_tx_fifo_size) <<
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700998 GDFIFOCFG_EPINFOBASE_SHIFT &
999 GDFIFOCFG_EPINFOBASE_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001000 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001001 }
1002}
1003
1004/**
1005 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1006 * Host mode
1007 *
1008 * @hsotg: Programming view of DWC_otg controller
1009 *
1010 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1011 * request queues. Host channels are reset to ensure that they are ready for
1012 * performing transfers.
1013 */
1014void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
1015{
1016 u32 hcfg, hfir, otgctl;
1017
1018 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
1019
1020 /* Restart the Phy Clock */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001021 dwc2_writel(0, hsotg->regs + PCGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001022
1023 /* Initialize Host Configuration Register */
1024 dwc2_init_fs_ls_pclk_sel(hsotg);
1025 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001026 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001027 hcfg |= HCFG_FSLSSUPP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001028 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001029 }
1030
1031 /*
1032 * This bit allows dynamic reloading of the HFIR register during
Masanari Iida0dcde5082013-09-13 23:34:36 +09001033 * runtime. This bit needs to be programmed during initial configuration
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001034 * and its value must not be changed during runtime.
1035 */
1036 if (hsotg->core_params->reload_ctl > 0) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001037 hfir = dwc2_readl(hsotg->regs + HFIR);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001038 hfir |= HFIR_RLDCTRL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001039 dwc2_writel(hfir, hsotg->regs + HFIR);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001040 }
1041
1042 if (hsotg->core_params->dma_desc_enable > 0) {
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001043 u32 op_mode = hsotg->hw_params.op_mode;
1044 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
1045 !hsotg->hw_params.dma_desc_enable ||
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001046 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
1047 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
1048 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
1049 dev_err(hsotg->dev,
1050 "Hardware does not support descriptor DMA mode -\n");
1051 dev_err(hsotg->dev,
1052 "falling back to buffer DMA mode.\n");
1053 hsotg->core_params->dma_desc_enable = 0;
1054 } else {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001055 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001056 hcfg |= HCFG_DESCDMA;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001057 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001058 }
1059 }
1060
1061 /* Configure data FIFO sizes */
1062 dwc2_config_fifos(hsotg);
1063
1064 /* TODO - check this */
1065 /* Clear Host Set HNP Enable in the OTG Control Register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001066 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001067 otgctl &= ~GOTGCTL_HSTSETHNPEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001068 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001069
1070 /* Make sure the FIFOs are flushed */
1071 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
1072 dwc2_flush_rx_fifo(hsotg);
1073
1074 /* Clear Host Set HNP Enable in the OTG Control Register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001075 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001076 otgctl &= ~GOTGCTL_HSTSETHNPEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001077 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001078
1079 if (hsotg->core_params->dma_desc_enable <= 0) {
1080 int num_channels, i;
1081 u32 hcchar;
1082
1083 /* Flush out any leftover queued requests */
1084 num_channels = hsotg->core_params->host_channels;
1085 for (i = 0; i < num_channels; i++) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001086 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001087 hcchar &= ~HCCHAR_CHENA;
1088 hcchar |= HCCHAR_CHDIS;
1089 hcchar &= ~HCCHAR_EPDIR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001090 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001091 }
1092
1093 /* Halt all channels to put them into a known state */
1094 for (i = 0; i < num_channels; i++) {
1095 int count = 0;
1096
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001097 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001098 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
1099 hcchar &= ~HCCHAR_EPDIR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001100 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001101 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
1102 __func__, i);
1103 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001104 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001105 if (++count > 1000) {
1106 dev_err(hsotg->dev,
1107 "Unable to clear enable on channel %d\n",
1108 i);
1109 break;
1110 }
1111 udelay(1);
1112 } while (hcchar & HCCHAR_CHENA);
1113 }
1114 }
1115
1116 /* Turn on the vbus power */
1117 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
1118 if (hsotg->op_state == OTG_STATE_A_HOST) {
1119 u32 hprt0 = dwc2_read_hprt0(hsotg);
1120
1121 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
1122 !!(hprt0 & HPRT0_PWR));
1123 if (!(hprt0 & HPRT0_PWR)) {
1124 hprt0 |= HPRT0_PWR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001125 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001126 }
1127 }
1128
1129 dwc2_enable_host_interrupts(hsotg);
1130}
1131
1132static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
1133 struct dwc2_host_chan *chan)
1134{
1135 u32 hcintmsk = HCINTMSK_CHHLTD;
1136
1137 switch (chan->ep_type) {
1138 case USB_ENDPOINT_XFER_CONTROL:
1139 case USB_ENDPOINT_XFER_BULK:
1140 dev_vdbg(hsotg->dev, "control/bulk\n");
1141 hcintmsk |= HCINTMSK_XFERCOMPL;
1142 hcintmsk |= HCINTMSK_STALL;
1143 hcintmsk |= HCINTMSK_XACTERR;
1144 hcintmsk |= HCINTMSK_DATATGLERR;
1145 if (chan->ep_is_in) {
1146 hcintmsk |= HCINTMSK_BBLERR;
1147 } else {
1148 hcintmsk |= HCINTMSK_NAK;
1149 hcintmsk |= HCINTMSK_NYET;
1150 if (chan->do_ping)
1151 hcintmsk |= HCINTMSK_ACK;
1152 }
1153
1154 if (chan->do_split) {
1155 hcintmsk |= HCINTMSK_NAK;
1156 if (chan->complete_split)
1157 hcintmsk |= HCINTMSK_NYET;
1158 else
1159 hcintmsk |= HCINTMSK_ACK;
1160 }
1161
1162 if (chan->error_state)
1163 hcintmsk |= HCINTMSK_ACK;
1164 break;
1165
1166 case USB_ENDPOINT_XFER_INT:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001167 if (dbg_perio())
1168 dev_vdbg(hsotg->dev, "intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001169 hcintmsk |= HCINTMSK_XFERCOMPL;
1170 hcintmsk |= HCINTMSK_NAK;
1171 hcintmsk |= HCINTMSK_STALL;
1172 hcintmsk |= HCINTMSK_XACTERR;
1173 hcintmsk |= HCINTMSK_DATATGLERR;
1174 hcintmsk |= HCINTMSK_FRMOVRUN;
1175
1176 if (chan->ep_is_in)
1177 hcintmsk |= HCINTMSK_BBLERR;
1178 if (chan->error_state)
1179 hcintmsk |= HCINTMSK_ACK;
1180 if (chan->do_split) {
1181 if (chan->complete_split)
1182 hcintmsk |= HCINTMSK_NYET;
1183 else
1184 hcintmsk |= HCINTMSK_ACK;
1185 }
1186 break;
1187
1188 case USB_ENDPOINT_XFER_ISOC:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001189 if (dbg_perio())
1190 dev_vdbg(hsotg->dev, "isoc\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001191 hcintmsk |= HCINTMSK_XFERCOMPL;
1192 hcintmsk |= HCINTMSK_FRMOVRUN;
1193 hcintmsk |= HCINTMSK_ACK;
1194
1195 if (chan->ep_is_in) {
1196 hcintmsk |= HCINTMSK_XACTERR;
1197 hcintmsk |= HCINTMSK_BBLERR;
1198 }
1199 break;
1200 default:
1201 dev_err(hsotg->dev, "## Unknown EP type ##\n");
1202 break;
1203 }
1204
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001205 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001206 if (dbg_hc(chan))
1207 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001208}
1209
1210static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
1211 struct dwc2_host_chan *chan)
1212{
1213 u32 hcintmsk = HCINTMSK_CHHLTD;
1214
1215 /*
1216 * For Descriptor DMA mode core halts the channel on AHB error.
1217 * Interrupt is not required.
1218 */
1219 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001220 if (dbg_hc(chan))
1221 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001222 hcintmsk |= HCINTMSK_AHBERR;
1223 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001224 if (dbg_hc(chan))
1225 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001226 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1227 hcintmsk |= HCINTMSK_XFERCOMPL;
1228 }
1229
1230 if (chan->error_state && !chan->do_split &&
1231 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001232 if (dbg_hc(chan))
1233 dev_vdbg(hsotg->dev, "setting ACK\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001234 hcintmsk |= HCINTMSK_ACK;
1235 if (chan->ep_is_in) {
1236 hcintmsk |= HCINTMSK_DATATGLERR;
1237 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
1238 hcintmsk |= HCINTMSK_NAK;
1239 }
1240 }
1241
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001242 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001243 if (dbg_hc(chan))
1244 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001245}
1246
1247static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
1248 struct dwc2_host_chan *chan)
1249{
1250 u32 intmsk;
1251
1252 if (hsotg->core_params->dma_enable > 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001253 if (dbg_hc(chan))
1254 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001255 dwc2_hc_enable_dma_ints(hsotg, chan);
1256 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001257 if (dbg_hc(chan))
1258 dev_vdbg(hsotg->dev, "DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001259 dwc2_hc_enable_slave_ints(hsotg, chan);
1260 }
1261
1262 /* Enable the top level host channel interrupt */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001263 intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001264 intmsk |= 1 << chan->hc_num;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001265 dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001266 if (dbg_hc(chan))
1267 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001268
1269 /* Make sure host channel interrupts are enabled */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001270 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001271 intmsk |= GINTSTS_HCHINT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001272 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001273 if (dbg_hc(chan))
1274 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001275}
1276
1277/**
1278 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1279 * a specific endpoint
1280 *
1281 * @hsotg: Programming view of DWC_otg controller
1282 * @chan: Information needed to initialize the host channel
1283 *
1284 * The HCCHARn register is set up with the characteristics specified in chan.
1285 * Host channel interrupts that may need to be serviced while this transfer is
1286 * in progress are enabled.
1287 */
1288void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1289{
1290 u8 hc_num = chan->hc_num;
1291 u32 hcintmsk;
1292 u32 hcchar;
1293 u32 hcsplt = 0;
1294
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001295 if (dbg_hc(chan))
1296 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001297
1298 /* Clear old interrupt conditions for this host channel */
1299 hcintmsk = 0xffffffff;
1300 hcintmsk &= ~HCINTMSK_RESERVED14_31;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001301 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001302
1303 /* Enable channel interrupts required for this transfer */
1304 dwc2_hc_enable_ints(hsotg, chan);
1305
1306 /*
1307 * Program the HCCHARn register with the endpoint characteristics for
1308 * the current transfer
1309 */
1310 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
1311 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
1312 if (chan->ep_is_in)
1313 hcchar |= HCCHAR_EPDIR;
1314 if (chan->speed == USB_SPEED_LOW)
1315 hcchar |= HCCHAR_LSPDDEV;
1316 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
1317 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001318 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001319 if (dbg_hc(chan)) {
1320 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
1321 hc_num, hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001322
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001323 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
1324 __func__, hc_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001325 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001326 chan->dev_addr);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001327 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001328 chan->ep_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001329 dev_vdbg(hsotg->dev, " Is In: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001330 chan->ep_is_in);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001331 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001332 chan->speed == USB_SPEED_LOW);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001333 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001334 chan->ep_type);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001335 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001336 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001337 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001338
1339 /* Program the HCSPLT register for SPLITs */
1340 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001341 if (dbg_hc(chan))
1342 dev_vdbg(hsotg->dev,
1343 "Programming HC %d with split --> %s\n",
1344 hc_num,
1345 chan->complete_split ? "CSPLIT" : "SSPLIT");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001346 if (chan->complete_split)
1347 hcsplt |= HCSPLT_COMPSPLT;
1348 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
1349 HCSPLT_XACTPOS_MASK;
1350 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1351 HCSPLT_HUBADDR_MASK;
1352 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1353 HCSPLT_PRTADDR_MASK;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001354 if (dbg_hc(chan)) {
1355 dev_vdbg(hsotg->dev, " comp split %d\n",
1356 chan->complete_split);
1357 dev_vdbg(hsotg->dev, " xact pos %d\n",
1358 chan->xact_pos);
1359 dev_vdbg(hsotg->dev, " hub addr %d\n",
1360 chan->hub_addr);
1361 dev_vdbg(hsotg->dev, " hub port %d\n",
1362 chan->hub_port);
1363 dev_vdbg(hsotg->dev, " is_in %d\n",
1364 chan->ep_is_in);
1365 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001366 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001367 dev_vdbg(hsotg->dev, " xferlen %d\n",
1368 chan->xfer_len);
1369 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001370 }
1371
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001372 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001373}
1374
1375/**
1376 * dwc2_hc_halt() - Attempts to halt a host channel
1377 *
1378 * @hsotg: Controller register interface
1379 * @chan: Host channel to halt
1380 * @halt_status: Reason for halting the channel
1381 *
1382 * This function should only be called in Slave mode or to abort a transfer in
1383 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1384 * controller halts the channel when the transfer is complete or a condition
1385 * occurs that requires application intervention.
1386 *
1387 * In slave mode, checks for a free request queue entry, then sets the Channel
1388 * Enable and Channel Disable bits of the Host Channel Characteristics
1389 * register of the specified channel to intiate the halt. If there is no free
1390 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1391 * register to flush requests for this channel. In the latter case, sets a
1392 * flag to indicate that the host channel needs to be halted when a request
1393 * queue slot is open.
1394 *
1395 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1396 * HCCHARn register. The controller ensures there is space in the request
1397 * queue before submitting the halt request.
1398 *
1399 * Some time may elapse before the core flushes any posted requests for this
1400 * host channel and halts. The Channel Halted interrupt handler completes the
1401 * deactivation of the host channel.
1402 */
1403void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1404 enum dwc2_halt_status halt_status)
1405{
1406 u32 nptxsts, hptxsts, hcchar;
1407
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001408 if (dbg_hc(chan))
1409 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001410 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1411 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1412
1413 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1414 halt_status == DWC2_HC_XFER_AHB_ERR) {
1415 /*
1416 * Disable all channel interrupts except Ch Halted. The QTD
1417 * and QH state associated with this transfer has been cleared
1418 * (in the case of URB_DEQUEUE), so the channel needs to be
1419 * shut down carefully to prevent crashes.
1420 */
1421 u32 hcintmsk = HCINTMSK_CHHLTD;
1422
1423 dev_vdbg(hsotg->dev, "dequeue/error\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001424 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001425
1426 /*
1427 * Make sure no other interrupts besides halt are currently
1428 * pending. Handling another interrupt could cause a crash due
1429 * to the QTD and QH state.
1430 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001431 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001432
1433 /*
1434 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1435 * even if the channel was already halted for some other
1436 * reason
1437 */
1438 chan->halt_status = halt_status;
1439
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001440 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001441 if (!(hcchar & HCCHAR_CHENA)) {
1442 /*
1443 * The channel is either already halted or it hasn't
1444 * started yet. In DMA mode, the transfer may halt if
1445 * it finishes normally or a condition occurs that
1446 * requires driver intervention. Don't want to halt
1447 * the channel again. In either Slave or DMA mode,
1448 * it's possible that the transfer has been assigned
1449 * to a channel, but not started yet when an URB is
1450 * dequeued. Don't want to halt a channel that hasn't
1451 * started yet.
1452 */
1453 return;
1454 }
1455 }
1456 if (chan->halt_pending) {
1457 /*
1458 * A halt has already been issued for this channel. This might
1459 * happen when a transfer is aborted by a higher level in
1460 * the stack.
1461 */
1462 dev_vdbg(hsotg->dev,
1463 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1464 __func__, chan->hc_num);
1465 return;
1466 }
1467
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001468 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001469
1470 /* No need to set the bit in DDMA for disabling the channel */
1471 /* TODO check it everywhere channel is disabled */
1472 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001473 if (dbg_hc(chan))
1474 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001475 hcchar |= HCCHAR_CHENA;
1476 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001477 if (dbg_hc(chan))
1478 dev_dbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001479 }
1480 hcchar |= HCCHAR_CHDIS;
1481
1482 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001483 if (dbg_hc(chan))
1484 dev_vdbg(hsotg->dev, "DMA not enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001485 hcchar |= HCCHAR_CHENA;
1486
1487 /* Check for space in the request queue to issue the halt */
1488 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1489 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1490 dev_vdbg(hsotg->dev, "control/bulk\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001491 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001492 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1493 dev_vdbg(hsotg->dev, "Disabling channel\n");
1494 hcchar &= ~HCCHAR_CHENA;
1495 }
1496 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001497 if (dbg_perio())
1498 dev_vdbg(hsotg->dev, "isoc/intr\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001499 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001500 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1501 hsotg->queuing_high_bandwidth) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001502 if (dbg_perio())
1503 dev_vdbg(hsotg->dev, "Disabling channel\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001504 hcchar &= ~HCCHAR_CHENA;
1505 }
1506 }
1507 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001508 if (dbg_hc(chan))
1509 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001510 }
1511
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001512 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001513 chan->halt_status = halt_status;
1514
1515 if (hcchar & HCCHAR_CHENA) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001516 if (dbg_hc(chan))
1517 dev_vdbg(hsotg->dev, "Channel enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001518 chan->halt_pending = 1;
1519 chan->halt_on_queue = 0;
1520 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001521 if (dbg_hc(chan))
1522 dev_vdbg(hsotg->dev, "Channel disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001523 chan->halt_on_queue = 1;
1524 }
1525
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001526 if (dbg_hc(chan)) {
1527 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1528 chan->hc_num);
1529 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1530 hcchar);
1531 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1532 chan->halt_pending);
1533 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1534 chan->halt_on_queue);
1535 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1536 chan->halt_status);
1537 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001538}
1539
1540/**
1541 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1542 *
1543 * @hsotg: Programming view of DWC_otg controller
1544 * @chan: Identifies the host channel to clean up
1545 *
1546 * This function is normally called after a transfer is done and the host
1547 * channel is being released
1548 */
1549void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1550{
1551 u32 hcintmsk;
1552
1553 chan->xfer_started = 0;
1554
1555 /*
1556 * Clear channel interrupt enables and any unhandled channel interrupt
1557 * conditions
1558 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001559 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001560 hcintmsk = 0xffffffff;
1561 hcintmsk &= ~HCINTMSK_RESERVED14_31;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001562 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001563}
1564
1565/**
1566 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1567 * which frame a periodic transfer should occur
1568 *
1569 * @hsotg: Programming view of DWC_otg controller
1570 * @chan: Identifies the host channel to set up and its properties
1571 * @hcchar: Current value of the HCCHAR register for the specified host channel
1572 *
1573 * This function has no effect on non-periodic transfers
1574 */
1575static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1576 struct dwc2_host_chan *chan, u32 *hcchar)
1577{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001578 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1579 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001580 /* 1 if _next_ frame is odd, 0 if it's even */
Paul Zimmerman81a58952013-06-24 11:34:23 -07001581 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001582 *hcchar |= HCCHAR_ODDFRM;
1583 }
1584}
1585
1586static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1587{
1588 /* Set up the initial PID for the transfer */
1589 if (chan->speed == USB_SPEED_HIGH) {
1590 if (chan->ep_is_in) {
1591 if (chan->multi_count == 1)
1592 chan->data_pid_start = DWC2_HC_PID_DATA0;
1593 else if (chan->multi_count == 2)
1594 chan->data_pid_start = DWC2_HC_PID_DATA1;
1595 else
1596 chan->data_pid_start = DWC2_HC_PID_DATA2;
1597 } else {
1598 if (chan->multi_count == 1)
1599 chan->data_pid_start = DWC2_HC_PID_DATA0;
1600 else
1601 chan->data_pid_start = DWC2_HC_PID_MDATA;
1602 }
1603 } else {
1604 chan->data_pid_start = DWC2_HC_PID_DATA0;
1605 }
1606}
1607
1608/**
1609 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1610 * the Host Channel
1611 *
1612 * @hsotg: Programming view of DWC_otg controller
1613 * @chan: Information needed to initialize the host channel
1614 *
1615 * This function should only be called in Slave mode. For a channel associated
1616 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1617 * associated with a periodic EP, the periodic Tx FIFO is written.
1618 *
1619 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1620 * the number of bytes written to the Tx FIFO.
1621 */
1622static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1623 struct dwc2_host_chan *chan)
1624{
1625 u32 i;
1626 u32 remaining_count;
1627 u32 byte_count;
1628 u32 dword_count;
1629 u32 __iomem *data_fifo;
1630 u32 *data_buf = (u32 *)chan->xfer_buf;
1631
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001632 if (dbg_hc(chan))
1633 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001634
1635 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1636
1637 remaining_count = chan->xfer_len - chan->xfer_count;
1638 if (remaining_count > chan->max_packet)
1639 byte_count = chan->max_packet;
1640 else
1641 byte_count = remaining_count;
1642
1643 dword_count = (byte_count + 3) / 4;
1644
1645 if (((unsigned long)data_buf & 0x3) == 0) {
1646 /* xfer_buf is DWORD aligned */
1647 for (i = 0; i < dword_count; i++, data_buf++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001648 dwc2_writel(*data_buf, data_fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001649 } else {
1650 /* xfer_buf is not DWORD aligned */
1651 for (i = 0; i < dword_count; i++, data_buf++) {
1652 u32 data = data_buf[0] | data_buf[1] << 8 |
1653 data_buf[2] << 16 | data_buf[3] << 24;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001654 dwc2_writel(data, data_fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001655 }
1656 }
1657
1658 chan->xfer_count += byte_count;
1659 chan->xfer_buf += byte_count;
1660}
1661
1662/**
1663 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1664 * channel and starts the transfer
1665 *
1666 * @hsotg: Programming view of DWC_otg controller
1667 * @chan: Information needed to initialize the host channel. The xfer_len value
1668 * may be reduced to accommodate the max widths of the XferSize and
1669 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1670 * changed to reflect the final xfer_len value.
1671 *
1672 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1673 * the caller must ensure that there is sufficient space in the request queue
1674 * and Tx Data FIFO.
1675 *
1676 * For an OUT transfer in Slave mode, it loads a data packet into the
1677 * appropriate FIFO. If necessary, additional data packets are loaded in the
1678 * Host ISR.
1679 *
1680 * For an IN transfer in Slave mode, a data packet is requested. The data
1681 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1682 * additional data packets are requested in the Host ISR.
1683 *
1684 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1685 * register along with a packet count of 1 and the channel is enabled. This
1686 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1687 * simply set to 0 since no data transfer occurs in this case.
1688 *
1689 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1690 * all the information required to perform the subsequent data transfer. In
1691 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1692 * controller performs the entire PING protocol, then starts the data
1693 * transfer.
1694 */
1695void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1696 struct dwc2_host_chan *chan)
1697{
1698 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1699 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1700 u32 hcchar;
1701 u32 hctsiz = 0;
1702 u16 num_packets;
1703
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001704 if (dbg_hc(chan))
1705 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001706
1707 if (chan->do_ping) {
1708 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001709 if (dbg_hc(chan))
1710 dev_vdbg(hsotg->dev, "ping, no DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001711 dwc2_hc_do_ping(hsotg, chan);
1712 chan->xfer_started = 1;
1713 return;
1714 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001715 if (dbg_hc(chan))
1716 dev_vdbg(hsotg->dev, "ping, DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001717 hctsiz |= TSIZ_DOPNG;
1718 }
1719 }
1720
1721 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001722 if (dbg_hc(chan))
1723 dev_vdbg(hsotg->dev, "split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001724 num_packets = 1;
1725
1726 if (chan->complete_split && !chan->ep_is_in)
1727 /*
1728 * For CSPLIT OUT Transfer, set the size to 0 so the
1729 * core doesn't expect any data written to the FIFO
1730 */
1731 chan->xfer_len = 0;
1732 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1733 chan->xfer_len = chan->max_packet;
1734 else if (!chan->ep_is_in && chan->xfer_len > 188)
1735 chan->xfer_len = 188;
1736
1737 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1738 TSIZ_XFERSIZE_MASK;
1739 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001740 if (dbg_hc(chan))
1741 dev_vdbg(hsotg->dev, "no split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001742 /*
1743 * Ensure that the transfer length and packet count will fit
1744 * in the widths allocated for them in the HCTSIZn register
1745 */
1746 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1747 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1748 /*
1749 * Make sure the transfer size is no larger than one
1750 * (micro)frame's worth of data. (A check was done
1751 * when the periodic transfer was accepted to ensure
1752 * that a (micro)frame's worth of data can be
1753 * programmed into a channel.)
1754 */
1755 u32 max_periodic_len =
1756 chan->multi_count * chan->max_packet;
1757
1758 if (chan->xfer_len > max_periodic_len)
1759 chan->xfer_len = max_periodic_len;
1760 } else if (chan->xfer_len > max_hc_xfer_size) {
1761 /*
1762 * Make sure that xfer_len is a multiple of max packet
1763 * size
1764 */
1765 chan->xfer_len =
1766 max_hc_xfer_size - chan->max_packet + 1;
1767 }
1768
1769 if (chan->xfer_len > 0) {
1770 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1771 chan->max_packet;
1772 if (num_packets > max_hc_pkt_count) {
1773 num_packets = max_hc_pkt_count;
1774 chan->xfer_len = num_packets * chan->max_packet;
1775 }
1776 } else {
1777 /* Need 1 packet for transfer length of 0 */
1778 num_packets = 1;
1779 }
1780
1781 if (chan->ep_is_in)
1782 /*
1783 * Always program an integral # of max packets for IN
1784 * transfers
1785 */
1786 chan->xfer_len = num_packets * chan->max_packet;
1787
1788 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1789 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1790 /*
1791 * Make sure that the multi_count field matches the
1792 * actual transfer length
1793 */
1794 chan->multi_count = num_packets;
1795
1796 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1797 dwc2_set_pid_isoc(chan);
1798
1799 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1800 TSIZ_XFERSIZE_MASK;
1801 }
1802
1803 chan->start_pkt_count = num_packets;
1804 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1805 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1806 TSIZ_SC_MC_PID_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001807 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001808 if (dbg_hc(chan)) {
1809 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1810 hctsiz, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001811
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001812 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1813 chan->hc_num);
1814 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001815 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1816 TSIZ_XFERSIZE_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001817 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001818 (hctsiz & TSIZ_PKTCNT_MASK) >>
1819 TSIZ_PKTCNT_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001820 dev_vdbg(hsotg->dev, " Start PID: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001821 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1822 TSIZ_SC_MC_PID_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001823 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001824
1825 if (hsotg->core_params->dma_enable > 0) {
1826 dma_addr_t dma_addr;
1827
1828 if (chan->align_buf) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001829 if (dbg_hc(chan))
1830 dev_vdbg(hsotg->dev, "align_buf\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001831 dma_addr = chan->align_buf;
1832 } else {
1833 dma_addr = chan->xfer_dma;
1834 }
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001835 dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001836 if (dbg_hc(chan))
1837 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1838 (unsigned long)dma_addr, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001839 }
1840
1841 /* Start the split */
1842 if (chan->do_split) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001843 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001844
1845 hcsplt |= HCSPLT_SPLTENA;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001846 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001847 }
1848
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001849 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001850 hcchar &= ~HCCHAR_MULTICNT_MASK;
1851 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1852 HCCHAR_MULTICNT_MASK;
1853 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1854
1855 if (hcchar & HCCHAR_CHDIS)
1856 dev_warn(hsotg->dev,
1857 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1858 __func__, chan->hc_num, hcchar);
1859
1860 /* Set host channel enable after all other setup is complete */
1861 hcchar |= HCCHAR_CHENA;
1862 hcchar &= ~HCCHAR_CHDIS;
1863
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001864 if (dbg_hc(chan))
1865 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001866 (hcchar & HCCHAR_MULTICNT_MASK) >>
1867 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001868
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001869 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001870 if (dbg_hc(chan))
1871 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1872 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001873
1874 chan->xfer_started = 1;
1875 chan->requests++;
1876
1877 if (hsotg->core_params->dma_enable <= 0 &&
1878 !chan->ep_is_in && chan->xfer_len > 0)
1879 /* Load OUT packet into the appropriate Tx FIFO */
1880 dwc2_hc_write_packet(hsotg, chan);
1881}
1882
1883/**
1884 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1885 * host channel and starts the transfer in Descriptor DMA mode
1886 *
1887 * @hsotg: Programming view of DWC_otg controller
1888 * @chan: Information needed to initialize the host channel
1889 *
1890 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1891 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1892 * with micro-frame bitmap.
1893 *
1894 * Initializes HCDMA register with descriptor list address and CTD value then
1895 * starts the transfer via enabling the channel.
1896 */
1897void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1898 struct dwc2_host_chan *chan)
1899{
1900 u32 hcchar;
1901 u32 hc_dma;
1902 u32 hctsiz = 0;
1903
1904 if (chan->do_ping)
1905 hctsiz |= TSIZ_DOPNG;
1906
1907 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1908 dwc2_set_pid_isoc(chan);
1909
1910 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1911 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1912 TSIZ_SC_MC_PID_MASK;
1913
1914 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1915 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1916
1917 /* Non-zero only for high-speed interrupt endpoints */
1918 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1919
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001920 if (dbg_hc(chan)) {
1921 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1922 chan->hc_num);
1923 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1924 chan->data_pid_start);
1925 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1926 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001927
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001928 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001929
1930 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1931
1932 /* Always start from first descriptor */
1933 hc_dma &= ~HCDMA_CTD_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001934 dwc2_writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001935 if (dbg_hc(chan))
1936 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1937 hc_dma, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001938
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001939 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001940 hcchar &= ~HCCHAR_MULTICNT_MASK;
1941 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1942 HCCHAR_MULTICNT_MASK;
1943
1944 if (hcchar & HCCHAR_CHDIS)
1945 dev_warn(hsotg->dev,
1946 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1947 __func__, chan->hc_num, hcchar);
1948
1949 /* Set host channel enable after all other setup is complete */
1950 hcchar |= HCCHAR_CHENA;
1951 hcchar &= ~HCCHAR_CHDIS;
1952
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001953 if (dbg_hc(chan))
1954 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001955 (hcchar & HCCHAR_MULTICNT_MASK) >>
1956 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001957
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001958 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001959 if (dbg_hc(chan))
1960 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1961 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001962
1963 chan->xfer_started = 1;
1964 chan->requests++;
1965}
1966
1967/**
1968 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1969 * a previous call to dwc2_hc_start_transfer()
1970 *
1971 * @hsotg: Programming view of DWC_otg controller
1972 * @chan: Information needed to initialize the host channel
1973 *
1974 * The caller must ensure there is sufficient space in the request queue and Tx
1975 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1976 * the controller acts autonomously to complete transfers programmed to a host
1977 * channel.
1978 *
1979 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1980 * if there is any data remaining to be queued. For an IN transfer, another
1981 * data packet is always requested. For the SETUP phase of a control transfer,
1982 * this function does nothing.
1983 *
1984 * Return: 1 if a new request is queued, 0 if no more requests are required
1985 * for this transfer
1986 */
1987int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1988 struct dwc2_host_chan *chan)
1989{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001990 if (dbg_hc(chan))
1991 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1992 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001993
1994 if (chan->do_split)
1995 /* SPLITs always queue just once per channel */
1996 return 0;
1997
1998 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
1999 /* SETUPs are queued only once since they can't be NAK'd */
2000 return 0;
2001
2002 if (chan->ep_is_in) {
2003 /*
2004 * Always queue another request for other IN transfers. If
2005 * back-to-back INs are issued and NAKs are received for both,
2006 * the driver may still be processing the first NAK when the
2007 * second NAK is received. When the interrupt handler clears
2008 * the NAK interrupt for the first NAK, the second NAK will
2009 * not be seen. So we can't depend on the NAK interrupt
2010 * handler to requeue a NAK'd request. Instead, IN requests
2011 * are issued each time this function is called. When the
2012 * transfer completes, the extra requests for the channel will
2013 * be flushed.
2014 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002015 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002016
2017 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2018 hcchar |= HCCHAR_CHENA;
2019 hcchar &= ~HCCHAR_CHDIS;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002020 if (dbg_hc(chan))
2021 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
2022 hcchar);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002023 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002024 chan->requests++;
2025 return 1;
2026 }
2027
2028 /* OUT transfers */
2029
2030 if (chan->xfer_count < chan->xfer_len) {
2031 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2032 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002033 u32 hcchar = dwc2_readl(hsotg->regs +
2034 HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002035
2036 dwc2_hc_set_even_odd_frame(hsotg, chan,
2037 &hcchar);
2038 }
2039
2040 /* Load OUT packet into the appropriate Tx FIFO */
2041 dwc2_hc_write_packet(hsotg, chan);
2042 chan->requests++;
2043 return 1;
2044 }
2045
2046 return 0;
2047}
2048
2049/**
2050 * dwc2_hc_do_ping() - Starts a PING transfer
2051 *
2052 * @hsotg: Programming view of DWC_otg controller
2053 * @chan: Information needed to initialize the host channel
2054 *
2055 * This function should only be called in Slave mode. The Do Ping bit is set in
2056 * the HCTSIZ register, then the channel is enabled.
2057 */
2058void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
2059{
2060 u32 hcchar;
2061 u32 hctsiz;
2062
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002063 if (dbg_hc(chan))
2064 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2065 chan->hc_num);
2066
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002067
2068 hctsiz = TSIZ_DOPNG;
2069 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002070 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002071
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002072 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002073 hcchar |= HCCHAR_CHENA;
2074 hcchar &= ~HCCHAR_CHDIS;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002075 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002076}
2077
2078/**
2079 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2080 * the HFIR register according to PHY type and speed
2081 *
2082 * @hsotg: Programming view of DWC_otg controller
2083 *
2084 * NOTE: The caller can modify the value of the HFIR register only after the
2085 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2086 * has been set
2087 */
2088u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
2089{
2090 u32 usbcfg;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002091 u32 hprt0;
2092 int clock = 60; /* default value */
2093
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002094 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2095 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002096
2097 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
2098 !(usbcfg & GUSBCFG_PHYIF16))
2099 clock = 60;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002100 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002101 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
2102 clock = 48;
2103 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2104 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2105 clock = 30;
2106 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2107 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
2108 clock = 60;
2109 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2110 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2111 clock = 48;
2112 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002113 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002114 clock = 48;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002115 if ((usbcfg & GUSBCFG_PHYSEL) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002116 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002117 clock = 48;
2118
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002119 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002120 /* High speed case */
2121 return 125 * clock;
2122 else
2123 /* FS/LS case */
2124 return 1000 * clock;
2125}
2126
2127/**
2128 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2129 * buffer
2130 *
2131 * @core_if: Programming view of DWC_otg controller
2132 * @dest: Destination buffer for the packet
2133 * @bytes: Number of bytes to copy to the destination
2134 */
2135void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
2136{
2137 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
2138 u32 *data_buf = (u32 *)dest;
2139 int word_count = (bytes + 3) / 4;
2140 int i;
2141
2142 /*
2143 * Todo: Account for the case where dest is not dword aligned. This
2144 * requires reading data from the FIFO into a u32 temp buffer, then
2145 * moving it into the data buffer.
2146 */
2147
2148 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
2149
2150 for (i = 0; i < word_count; i++, data_buf++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002151 *data_buf = dwc2_readl(fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002152}
2153
2154/**
2155 * dwc2_dump_host_registers() - Prints the host registers
2156 *
2157 * @hsotg: Programming view of DWC_otg controller
2158 *
2159 * NOTE: This function will be removed once the peripheral controller code
2160 * is integrated and the driver is stable
2161 */
2162void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
2163{
2164#ifdef DEBUG
2165 u32 __iomem *addr;
2166 int i;
2167
2168 dev_dbg(hsotg->dev, "Host Global Registers\n");
2169 addr = hsotg->regs + HCFG;
2170 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002171 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002172 addr = hsotg->regs + HFIR;
2173 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002174 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002175 addr = hsotg->regs + HFNUM;
2176 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002177 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002178 addr = hsotg->regs + HPTXSTS;
2179 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002180 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002181 addr = hsotg->regs + HAINT;
2182 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002183 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002184 addr = hsotg->regs + HAINTMSK;
2185 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002186 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002187 if (hsotg->core_params->dma_desc_enable > 0) {
2188 addr = hsotg->regs + HFLBADDR;
2189 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002190 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002191 }
2192
2193 addr = hsotg->regs + HPRT0;
2194 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002195 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002196
2197 for (i = 0; i < hsotg->core_params->host_channels; i++) {
2198 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
2199 addr = hsotg->regs + HCCHAR(i);
2200 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002201 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002202 addr = hsotg->regs + HCSPLT(i);
2203 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002204 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002205 addr = hsotg->regs + HCINT(i);
2206 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002207 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002208 addr = hsotg->regs + HCINTMSK(i);
2209 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002210 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002211 addr = hsotg->regs + HCTSIZ(i);
2212 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002213 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002214 addr = hsotg->regs + HCDMA(i);
2215 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002216 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002217 if (hsotg->core_params->dma_desc_enable > 0) {
2218 addr = hsotg->regs + HCDMAB(i);
2219 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002220 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002221 }
2222 }
2223#endif
2224}
2225
2226/**
2227 * dwc2_dump_global_registers() - Prints the core global registers
2228 *
2229 * @hsotg: Programming view of DWC_otg controller
2230 *
2231 * NOTE: This function will be removed once the peripheral controller code
2232 * is integrated and the driver is stable
2233 */
2234void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
2235{
2236#ifdef DEBUG
2237 u32 __iomem *addr;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002238
2239 dev_dbg(hsotg->dev, "Core Global Registers\n");
2240 addr = hsotg->regs + GOTGCTL;
2241 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002242 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002243 addr = hsotg->regs + GOTGINT;
2244 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002245 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002246 addr = hsotg->regs + GAHBCFG;
2247 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002248 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002249 addr = hsotg->regs + GUSBCFG;
2250 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002251 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002252 addr = hsotg->regs + GRSTCTL;
2253 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002254 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002255 addr = hsotg->regs + GINTSTS;
2256 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002257 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002258 addr = hsotg->regs + GINTMSK;
2259 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002260 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002261 addr = hsotg->regs + GRXSTSR;
2262 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002263 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002264 addr = hsotg->regs + GRXFSIZ;
2265 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002266 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002267 addr = hsotg->regs + GNPTXFSIZ;
2268 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002269 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002270 addr = hsotg->regs + GNPTXSTS;
2271 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002272 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002273 addr = hsotg->regs + GI2CCTL;
2274 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002275 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002276 addr = hsotg->regs + GPVNDCTL;
2277 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002278 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002279 addr = hsotg->regs + GGPIO;
2280 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002281 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002282 addr = hsotg->regs + GUID;
2283 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002284 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002285 addr = hsotg->regs + GSNPSID;
2286 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002287 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002288 addr = hsotg->regs + GHWCFG1;
2289 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002290 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002291 addr = hsotg->regs + GHWCFG2;
2292 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002293 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002294 addr = hsotg->regs + GHWCFG3;
2295 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002296 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002297 addr = hsotg->regs + GHWCFG4;
2298 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002299 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002300 addr = hsotg->regs + GLPMCFG;
2301 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002302 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002303 addr = hsotg->regs + GPWRDN;
2304 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002305 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002306 addr = hsotg->regs + GDFIFOCFG;
2307 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002308 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002309 addr = hsotg->regs + HPTXFSIZ;
2310 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002311 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002312
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002313 addr = hsotg->regs + PCGCTL;
2314 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002315 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002316#endif
2317}
2318
2319/**
2320 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2321 *
2322 * @hsotg: Programming view of DWC_otg controller
2323 * @num: Tx FIFO to flush
2324 */
2325void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
2326{
2327 u32 greset;
2328 int count = 0;
2329
2330 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
2331
2332 greset = GRSTCTL_TXFFLSH;
2333 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002334 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002335
2336 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002337 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002338 if (++count > 10000) {
2339 dev_warn(hsotg->dev,
2340 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2341 __func__, greset,
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002342 dwc2_readl(hsotg->regs + GNPTXSTS));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002343 break;
2344 }
2345 udelay(1);
2346 } while (greset & GRSTCTL_TXFFLSH);
2347
2348 /* Wait for at least 3 PHY Clocks */
2349 udelay(1);
2350}
2351
2352/**
2353 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2354 *
2355 * @hsotg: Programming view of DWC_otg controller
2356 */
2357void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
2358{
2359 u32 greset;
2360 int count = 0;
2361
2362 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2363
2364 greset = GRSTCTL_RXFFLSH;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002365 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002366
2367 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002368 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002369 if (++count > 10000) {
2370 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2371 __func__, greset);
2372 break;
2373 }
2374 udelay(1);
2375 } while (greset & GRSTCTL_RXFFLSH);
2376
2377 /* Wait for at least 3 PHY Clocks */
2378 udelay(1);
2379}
2380
Paul Zimmerman498f0662013-11-22 16:43:47 -08002381#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002382
2383/* Parameter access functions */
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002384void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002385{
2386 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002387
2388 switch (val) {
2389 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002390 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002391 valid = 0;
2392 break;
2393 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002394 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002395 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2396 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2397 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2398 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2399 break;
2400 default:
2401 valid = 0;
2402 break;
2403 }
2404 break;
2405 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2406 /* always valid */
2407 break;
2408 default:
2409 valid = 0;
2410 break;
2411 }
2412
2413 if (!valid) {
2414 if (val >= 0)
2415 dev_err(hsotg->dev,
2416 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2417 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002418 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002419 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2420 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2421 break;
2422 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2423 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2424 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2425 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2426 break;
2427 default:
2428 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2429 break;
2430 }
2431 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002432 }
2433
2434 hsotg->core_params->otg_cap = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002435}
2436
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002437void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002438{
2439 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002440
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002441 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002442 valid = 0;
2443 if (val < 0)
2444 valid = 0;
2445
2446 if (!valid) {
2447 if (val >= 0)
2448 dev_err(hsotg->dev,
2449 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2450 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002451 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002452 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002453 }
2454
2455 hsotg->core_params->dma_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002456}
2457
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002458void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002459{
2460 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002461
2462 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002463 !hsotg->hw_params.dma_desc_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002464 valid = 0;
2465 if (val < 0)
2466 valid = 0;
2467
2468 if (!valid) {
2469 if (val >= 0)
2470 dev_err(hsotg->dev,
2471 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2472 val);
2473 val = (hsotg->core_params->dma_enable > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002474 hsotg->hw_params.dma_desc_enable);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002475 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002476 }
2477
2478 hsotg->core_params->dma_desc_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002479}
2480
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002481void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2482 int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002483{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002484 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002485 if (val >= 0) {
2486 dev_err(hsotg->dev,
2487 "Wrong value for host_support_fs_low_power\n");
2488 dev_err(hsotg->dev,
2489 "host_support_fs_low_power must be 0 or 1\n");
2490 }
2491 val = 0;
2492 dev_dbg(hsotg->dev,
2493 "Setting host_support_fs_low_power to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002494 }
2495
2496 hsotg->core_params->host_support_fs_ls_low_power = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002497}
2498
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002499void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002500{
2501 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002502
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002503 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002504 valid = 0;
2505 if (val < 0)
2506 valid = 0;
2507
2508 if (!valid) {
2509 if (val >= 0)
2510 dev_err(hsotg->dev,
2511 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2512 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002513 val = hsotg->hw_params.enable_dynamic_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002514 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002515 }
2516
2517 hsotg->core_params->enable_dynamic_fifo = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002518}
2519
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002520void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002521{
2522 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002523
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002524 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002525 valid = 0;
2526
2527 if (!valid) {
2528 if (val >= 0)
2529 dev_err(hsotg->dev,
2530 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2531 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002532 val = hsotg->hw_params.host_rx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002533 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002534 }
2535
2536 hsotg->core_params->host_rx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002537}
2538
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002539void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002540{
2541 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002542
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002543 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002544 valid = 0;
2545
2546 if (!valid) {
2547 if (val >= 0)
2548 dev_err(hsotg->dev,
2549 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2550 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002551 val = hsotg->hw_params.host_nperio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002552 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2553 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002554 }
2555
2556 hsotg->core_params->host_nperio_tx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002557}
2558
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002559void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002560{
2561 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002562
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002563 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002564 valid = 0;
2565
2566 if (!valid) {
2567 if (val >= 0)
2568 dev_err(hsotg->dev,
2569 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2570 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002571 val = hsotg->hw_params.host_perio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002572 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2573 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002574 }
2575
2576 hsotg->core_params->host_perio_tx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002577}
2578
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002579void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002580{
2581 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002582
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002583 if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002584 valid = 0;
2585
2586 if (!valid) {
2587 if (val >= 0)
2588 dev_err(hsotg->dev,
2589 "%d invalid for max_transfer_size. Check HW configuration.\n",
2590 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002591 val = hsotg->hw_params.max_transfer_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002592 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002593 }
2594
2595 hsotg->core_params->max_transfer_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002596}
2597
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002598void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002599{
2600 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002601
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002602 if (val < 15 || val > hsotg->hw_params.max_packet_count)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002603 valid = 0;
2604
2605 if (!valid) {
2606 if (val >= 0)
2607 dev_err(hsotg->dev,
2608 "%d invalid for max_packet_count. Check HW configuration.\n",
2609 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002610 val = hsotg->hw_params.max_packet_count;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002611 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002612 }
2613
2614 hsotg->core_params->max_packet_count = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002615}
2616
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002617void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002618{
2619 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002620
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002621 if (val < 1 || val > hsotg->hw_params.host_channels)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002622 valid = 0;
2623
2624 if (!valid) {
2625 if (val >= 0)
2626 dev_err(hsotg->dev,
2627 "%d invalid for host_channels. Check HW configuration.\n",
2628 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002629 val = hsotg->hw_params.host_channels;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002630 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002631 }
2632
2633 hsotg->core_params->host_channels = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002634}
2635
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002636void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002637{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002638 int valid = 0;
Luis Ortega Perez de Villar0464a3d2013-09-25 13:10:50 +02002639 u32 hs_phy_type, fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002640
Paul Zimmerman498f0662013-11-22 16:43:47 -08002641 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2642 DWC2_PHY_TYPE_PARAM_ULPI)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002643 if (val >= 0) {
2644 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2645 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2646 }
2647
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002648 valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002649 }
2650
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002651 hs_phy_type = hsotg->hw_params.hs_phy_type;
2652 fs_phy_type = hsotg->hw_params.fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002653 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2654 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2655 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2656 valid = 1;
2657 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2658 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2659 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2660 valid = 1;
2661 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2662 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2663 valid = 1;
2664
2665 if (!valid) {
2666 if (val >= 0)
2667 dev_err(hsotg->dev,
2668 "%d invalid for phy_type. Check HW configuration.\n",
2669 val);
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002670 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002671 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2672 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2673 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2674 val = DWC2_PHY_TYPE_PARAM_UTMI;
2675 else
2676 val = DWC2_PHY_TYPE_PARAM_ULPI;
2677 }
2678 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002679 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002680
2681 hsotg->core_params->phy_type = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002682}
2683
2684static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2685{
2686 return hsotg->core_params->phy_type;
2687}
2688
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002689void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002690{
2691 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002692
Paul Zimmerman498f0662013-11-22 16:43:47 -08002693 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002694 if (val >= 0) {
2695 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2696 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2697 }
2698 valid = 0;
2699 }
2700
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002701 if (val == DWC2_SPEED_PARAM_HIGH &&
2702 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002703 valid = 0;
2704
2705 if (!valid) {
2706 if (val >= 0)
2707 dev_err(hsotg->dev,
2708 "%d invalid for speed parameter. Check HW configuration.\n",
2709 val);
2710 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002711 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002712 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002713 }
2714
2715 hsotg->core_params->speed = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002716}
2717
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002718void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002719{
2720 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002721
Paul Zimmerman498f0662013-11-22 16:43:47 -08002722 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2723 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002724 if (val >= 0) {
2725 dev_err(hsotg->dev,
2726 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2727 dev_err(hsotg->dev,
2728 "host_ls_low_power_phy_clk must be 0 or 1\n");
2729 }
2730 valid = 0;
2731 }
2732
2733 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2734 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2735 valid = 0;
2736
2737 if (!valid) {
2738 if (val >= 0)
2739 dev_err(hsotg->dev,
2740 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2741 val);
2742 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2743 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2744 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2745 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2746 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002747 }
2748
2749 hsotg->core_params->host_ls_low_power_phy_clk = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002750}
2751
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002752void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002753{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002754 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002755 if (val >= 0) {
2756 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2757 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2758 }
2759 val = 0;
2760 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002761 }
2762
2763 hsotg->core_params->phy_ulpi_ddr = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002764}
2765
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002766void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002767{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002768 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002769 if (val >= 0) {
2770 dev_err(hsotg->dev,
2771 "Wrong value for phy_ulpi_ext_vbus\n");
2772 dev_err(hsotg->dev,
2773 "phy_ulpi_ext_vbus must be 0 or 1\n");
2774 }
2775 val = 0;
2776 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002777 }
2778
2779 hsotg->core_params->phy_ulpi_ext_vbus = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002780}
2781
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002782void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002783{
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002784 int valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002785
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002786 switch (hsotg->hw_params.utmi_phy_data_width) {
2787 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2788 valid = (val == 8);
2789 break;
2790 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2791 valid = (val == 16);
2792 break;
2793 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2794 valid = (val == 8 || val == 16);
2795 break;
2796 }
2797
2798 if (!valid) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002799 if (val >= 0) {
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002800 dev_err(hsotg->dev,
2801 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2802 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002803 }
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002804 val = (hsotg->hw_params.utmi_phy_data_width ==
2805 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002806 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002807 }
2808
2809 hsotg->core_params->phy_utmi_width = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002810}
2811
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002812void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002813{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002814 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002815 if (val >= 0) {
2816 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2817 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2818 }
2819 val = 0;
2820 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002821 }
2822
2823 hsotg->core_params->ulpi_fs_ls = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002824}
2825
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002826void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002827{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002828 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002829 if (val >= 0) {
2830 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2831 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2832 }
2833 val = 0;
2834 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002835 }
2836
2837 hsotg->core_params->ts_dline = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002838}
2839
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002840void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002841{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002842 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002843
Paul Zimmerman498f0662013-11-22 16:43:47 -08002844 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002845 if (val >= 0) {
2846 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2847 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2848 }
2849
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002850 valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002851 }
2852
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002853 if (val == 1 && !(hsotg->hw_params.i2c_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002854 valid = 0;
2855
2856 if (!valid) {
2857 if (val >= 0)
2858 dev_err(hsotg->dev,
2859 "%d invalid for i2c_enable. Check HW configuration.\n",
2860 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002861 val = hsotg->hw_params.i2c_enable;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002862 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002863 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002864
2865 hsotg->core_params->i2c_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002866}
2867
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002868void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002869{
2870 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002871
Paul Zimmerman498f0662013-11-22 16:43:47 -08002872 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002873 if (val >= 0) {
2874 dev_err(hsotg->dev,
2875 "Wrong value for en_multiple_tx_fifo,\n");
2876 dev_err(hsotg->dev,
2877 "en_multiple_tx_fifo must be 0 or 1\n");
2878 }
2879 valid = 0;
2880 }
2881
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002882 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002883 valid = 0;
2884
2885 if (!valid) {
2886 if (val >= 0)
2887 dev_err(hsotg->dev,
2888 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2889 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002890 val = hsotg->hw_params.en_multiple_tx_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002891 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002892 }
2893
2894 hsotg->core_params->en_multiple_tx_fifo = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002895}
2896
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002897void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002898{
2899 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002900
Paul Zimmerman498f0662013-11-22 16:43:47 -08002901 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002902 if (val >= 0) {
2903 dev_err(hsotg->dev,
2904 "'%d' invalid for parameter reload_ctl\n", val);
2905 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2906 }
2907 valid = 0;
2908 }
2909
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002910 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002911 valid = 0;
2912
2913 if (!valid) {
2914 if (val >= 0)
2915 dev_err(hsotg->dev,
2916 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2917 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002918 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002919 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002920 }
2921
2922 hsotg->core_params->reload_ctl = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002923}
2924
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002925void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002926{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002927 if (val != -1)
2928 hsotg->core_params->ahbcfg = val;
2929 else
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002930 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
Luis Ortega Perez de Villar0464a3d2013-09-25 13:10:50 +02002931 GAHBCFG_HBSTLEN_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002932}
2933
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002934void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002935{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002936 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002937 if (val >= 0) {
2938 dev_err(hsotg->dev,
2939 "'%d' invalid for parameter otg_ver\n", val);
2940 dev_err(hsotg->dev,
2941 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2942 }
2943 val = 0;
2944 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002945 }
2946
2947 hsotg->core_params->otg_ver = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002948}
2949
Wei Yongjun49cf10c2013-11-28 10:27:59 +08002950static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
Paul Zimmermane8576e62013-11-25 13:42:47 -08002951{
2952 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2953 if (val >= 0) {
2954 dev_err(hsotg->dev,
2955 "'%d' invalid for parameter uframe_sched\n",
2956 val);
2957 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2958 }
2959 val = 1;
2960 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2961 }
2962
2963 hsotg->core_params->uframe_sched = val;
2964}
2965
Gregory Herreroa6d249d2015-04-29 22:09:04 +02002966static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
2967 int val)
2968{
2969 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2970 if (val >= 0) {
2971 dev_err(hsotg->dev,
2972 "'%d' invalid for parameter external_id_pin_ctl\n",
2973 val);
2974 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
2975 }
2976 val = 0;
2977 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
2978 }
2979
2980 hsotg->core_params->external_id_pin_ctl = val;
2981}
2982
Gregory Herrero285046a2015-04-29 22:09:19 +02002983static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
2984 int val)
2985{
2986 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2987 if (val >= 0) {
2988 dev_err(hsotg->dev,
2989 "'%d' invalid for parameter hibernation\n",
2990 val);
2991 dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
2992 }
2993 val = 0;
2994 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
2995 }
2996
2997 hsotg->core_params->hibernation = val;
2998}
2999
Paul Zimmermane8576e62013-11-25 13:42:47 -08003000/*
3001 * This function is called during module intialization to pass module parameters
3002 * for the DWC_otg core.
3003 */
3004void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
3005 const struct dwc2_core_params *params)
3006{
3007 dev_dbg(hsotg->dev, "%s()\n", __func__);
3008
3009 dwc2_set_param_otg_cap(hsotg, params->otg_cap);
3010 dwc2_set_param_dma_enable(hsotg, params->dma_enable);
3011 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
3012 dwc2_set_param_host_support_fs_ls_low_power(hsotg,
3013 params->host_support_fs_ls_low_power);
3014 dwc2_set_param_enable_dynamic_fifo(hsotg,
3015 params->enable_dynamic_fifo);
3016 dwc2_set_param_host_rx_fifo_size(hsotg,
3017 params->host_rx_fifo_size);
3018 dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
3019 params->host_nperio_tx_fifo_size);
3020 dwc2_set_param_host_perio_tx_fifo_size(hsotg,
3021 params->host_perio_tx_fifo_size);
3022 dwc2_set_param_max_transfer_size(hsotg,
3023 params->max_transfer_size);
3024 dwc2_set_param_max_packet_count(hsotg,
3025 params->max_packet_count);
3026 dwc2_set_param_host_channels(hsotg, params->host_channels);
3027 dwc2_set_param_phy_type(hsotg, params->phy_type);
3028 dwc2_set_param_speed(hsotg, params->speed);
3029 dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
3030 params->host_ls_low_power_phy_clk);
3031 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
3032 dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
3033 params->phy_ulpi_ext_vbus);
3034 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
3035 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
3036 dwc2_set_param_ts_dline(hsotg, params->ts_dline);
3037 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
3038 dwc2_set_param_en_multiple_tx_fifo(hsotg,
3039 params->en_multiple_tx_fifo);
3040 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
3041 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
3042 dwc2_set_param_otg_ver(hsotg, params->otg_ver);
3043 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
Gregory Herreroa6d249d2015-04-29 22:09:04 +02003044 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
Gregory Herrero285046a2015-04-29 22:09:19 +02003045 dwc2_set_param_hibernation(hsotg, params->hibernation);
Paul Zimmermane8576e62013-11-25 13:42:47 -08003046}
3047
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003048/**
3049 * During device initialization, read various hardware configuration
3050 * registers and interpret the contents.
3051 */
3052int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3053{
3054 struct dwc2_hw_params *hw = &hsotg->hw_params;
3055 unsigned width;
3056 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
3057 u32 hptxfsiz, grxfsiz, gnptxfsiz;
3058 u32 gusbcfg;
3059
3060 /*
3061 * Attempt to ensure this device is really a DWC_otg Controller.
3062 * Read and verify the GSNPSID register contents. The value should be
3063 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3064 * as in "OTG version 2.xx" or "OTG version 3.xx".
3065 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003066 hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003067 if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
3068 (hw->snpsid & 0xfffff000) != 0x4f543000) {
3069 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
3070 hw->snpsid);
3071 return -ENODEV;
3072 }
3073
3074 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3075 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
3076 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
3077
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003078 hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
3079 hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
3080 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
3081 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
3082 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003083
3084 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
3085 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
3086 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
3087 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003088 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
3089
Doug Anderson2867c052014-08-07 12:48:11 -07003090 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003091 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003092 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003093 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003094 usleep_range(100000, 150000);
3095
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003096 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
3097 hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
Doug Anderson2867c052014-08-07 12:48:11 -07003098 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003099 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003100 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003101 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003102 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003103 usleep_range(100000, 150000);
3104
3105 /* hwcfg2 */
3106 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3107 GHWCFG2_OP_MODE_SHIFT;
3108 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
3109 GHWCFG2_ARCHITECTURE_SHIFT;
3110 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
3111 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
3112 GHWCFG2_NUM_HOST_CHAN_SHIFT);
3113 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
3114 GHWCFG2_HS_PHY_TYPE_SHIFT;
3115 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
3116 GHWCFG2_FS_PHY_TYPE_SHIFT;
3117 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
3118 GHWCFG2_NUM_DEV_EP_SHIFT;
3119 hw->nperio_tx_q_depth =
3120 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
3121 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
3122 hw->host_perio_tx_q_depth =
3123 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
3124 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
3125 hw->dev_token_q_depth =
3126 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
3127 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
3128
3129 /* hwcfg3 */
3130 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
3131 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
3132 hw->max_transfer_size = (1 << (width + 11)) - 1;
Paul Zimmermane8f8c142014-09-16 13:47:26 -07003133 /*
3134 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3135 * coherent buffers with this size, and if it's too large we can
3136 * exhaust the coherent DMA pool.
3137 */
3138 if (hw->max_transfer_size > 65535)
3139 hw->max_transfer_size = 65535;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003140 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
3141 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
3142 hw->max_packet_count = (1 << (width + 4)) - 1;
3143 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
3144 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
3145 GHWCFG3_DFIFO_DEPTH_SHIFT;
3146
3147 /* hwcfg4 */
3148 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
3149 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
3150 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
3151 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
3152 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02003153 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
3154 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003155
3156 /* fifo sizes */
3157 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
3158 GRXFSIZ_DEPTH_SHIFT;
3159 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3160 FIFOSIZE_DEPTH_SHIFT;
3161 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3162 FIFOSIZE_DEPTH_SHIFT;
3163
3164 dev_dbg(hsotg->dev, "Detected values from hardware:\n");
3165 dev_dbg(hsotg->dev, " op_mode=%d\n",
3166 hw->op_mode);
3167 dev_dbg(hsotg->dev, " arch=%d\n",
3168 hw->arch);
3169 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
3170 hw->dma_desc_enable);
3171 dev_dbg(hsotg->dev, " power_optimized=%d\n",
3172 hw->power_optimized);
3173 dev_dbg(hsotg->dev, " i2c_enable=%d\n",
3174 hw->i2c_enable);
3175 dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
3176 hw->hs_phy_type);
3177 dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
3178 hw->fs_phy_type);
Masanari Iida971bd8f2015-05-20 23:54:02 +09003179 dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02003180 hw->utmi_phy_data_width);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003181 dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
3182 hw->num_dev_ep);
3183 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
3184 hw->num_dev_perio_in_ep);
3185 dev_dbg(hsotg->dev, " host_channels=%d\n",
3186 hw->host_channels);
3187 dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
3188 hw->max_transfer_size);
3189 dev_dbg(hsotg->dev, " max_packet_count=%d\n",
3190 hw->max_packet_count);
3191 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
3192 hw->nperio_tx_q_depth);
3193 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
3194 hw->host_perio_tx_q_depth);
3195 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
3196 hw->dev_token_q_depth);
3197 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
3198 hw->enable_dynamic_fifo);
3199 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
3200 hw->en_multiple_tx_fifo);
3201 dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
3202 hw->total_fifo_size);
3203 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
3204 hw->host_rx_fifo_size);
3205 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
3206 hw->host_nperio_tx_fifo_size);
3207 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
3208 hw->host_perio_tx_fifo_size);
3209 dev_dbg(hsotg->dev, "\n");
3210
3211 return 0;
3212}
Mian Yousaf Kaukabecb176c2015-04-29 22:09:05 +02003213
3214/*
3215 * Sets all parameters to the given value.
3216 *
3217 * Assumes that the dwc2_core_params struct contains only integers.
3218 */
3219void dwc2_set_all_params(struct dwc2_core_params *params, int value)
3220{
3221 int *p = (int *)params;
3222 size_t size = sizeof(*params) / sizeof(*p);
3223 int i;
3224
3225 for (i = 0; i < size; i++)
3226 p[i] = value;
3227}
Mian Yousaf Kaukabecb176c2015-04-29 22:09:05 +02003228
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003229
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003230u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
3231{
Paul Zimmermanb66a3f02013-11-22 16:43:50 -08003232 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003233}
3234
Paul Zimmerman057715f2013-11-22 16:43:51 -08003235bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003236{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003237 if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff)
Paul Zimmerman057715f2013-11-22 16:43:51 -08003238 return false;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003239 else
Paul Zimmerman057715f2013-11-22 16:43:51 -08003240 return true;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003241}
3242
3243/**
3244 * dwc2_enable_global_interrupts() - Enables the controller's Global
3245 * Interrupt in the AHB Config register
3246 *
3247 * @hsotg: Programming view of DWC_otg controller
3248 */
3249void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
3250{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003251 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003252
3253 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003254 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003255}
3256
3257/**
3258 * dwc2_disable_global_interrupts() - Disables the controller's Global
3259 * Interrupt in the AHB Config register
3260 *
3261 * @hsotg: Programming view of DWC_otg controller
3262 */
3263void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
3264{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003265 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003266
3267 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003268 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003269}
3270
3271MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
3272MODULE_AUTHOR("Synopsys, Inc.");
3273MODULE_LICENSE("Dual BSD/GPL");