blob: c5e0a45c565d660cb57c362c6471ac095877c9bf [file] [log] [blame]
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001/*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/dma-mapping.h>
48#include <linux/delay.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/usb.h>
52
53#include <linux/usb/hcd.h>
54#include <linux/usb/ch11.h>
55
56#include "core.h"
57#include "hcd.h"
58
Gregory Herrerod17ee772015-04-29 22:09:01 +020059#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
60/**
61 * dwc2_backup_host_registers() - Backup controller host registers.
62 * When suspending usb bus, registers needs to be backuped
63 * if controller power is disabled once suspended.
64 *
65 * @hsotg: Programming view of the DWC_otg controller
66 */
67static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
68{
69 struct dwc2_hregs_backup *hr;
70 int i;
71
72 dev_dbg(hsotg->dev, "%s\n", __func__);
73
74 /* Backup Host regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +020075 hr = &hsotg->hr_backup;
Antti Seppälä95c8bc32015-08-20 21:41:07 +030076 hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
77 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +020078 for (i = 0; i < hsotg->core_params->host_channels; ++i)
Antti Seppälä95c8bc32015-08-20 21:41:07 +030079 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +020080
Gregory Herrerocc047ce2015-09-22 15:16:37 +020081 hr->hprt0 = dwc2_read_hprt0(hsotg);
Antti Seppälä95c8bc32015-08-20 21:41:07 +030082 hr->hfir = dwc2_readl(hsotg->regs + HFIR);
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +020083 hr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +020084
85 return 0;
86}
87
88/**
89 * dwc2_restore_host_registers() - Restore controller host registers.
90 * When resuming usb bus, device registers needs to be restored
91 * if controller power were disabled.
92 *
93 * @hsotg: Programming view of the DWC_otg controller
94 */
95static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
96{
97 struct dwc2_hregs_backup *hr;
98 int i;
99
100 dev_dbg(hsotg->dev, "%s\n", __func__);
101
102 /* Restore host regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200103 hr = &hsotg->hr_backup;
104 if (!hr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200105 dev_err(hsotg->dev, "%s: no host registers to restore\n",
106 __func__);
107 return -EINVAL;
108 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200109 hr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200110
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300111 dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
112 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200113
114 for (i = 0; i < hsotg->core_params->host_channels; ++i)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300115 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200116
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300117 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
118 dwc2_writel(hr->hfir, hsotg->regs + HFIR);
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200119 hsotg->frame_number = 0;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200120
121 return 0;
122}
123#else
124static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
125{ return 0; }
126
127static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
128{ return 0; }
129#endif
130
131#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
132 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
133/**
134 * dwc2_backup_device_registers() - Backup controller device registers.
135 * When suspending usb bus, registers needs to be backuped
136 * if controller power is disabled once suspended.
137 *
138 * @hsotg: Programming view of the DWC_otg controller
139 */
140static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
141{
142 struct dwc2_dregs_backup *dr;
143 int i;
144
145 dev_dbg(hsotg->dev, "%s\n", __func__);
146
147 /* Backup dev regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200148 dr = &hsotg->dr_backup;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200149
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300150 dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
151 dr->dctl = dwc2_readl(hsotg->regs + DCTL);
152 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
153 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
154 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200155
156 for (i = 0; i < hsotg->num_of_eps; i++) {
157 /* Backup IN EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300158 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200159
160 /* Ensure DATA PID is correctly configured */
161 if (dr->diepctl[i] & DXEPCTL_DPID)
162 dr->diepctl[i] |= DXEPCTL_SETD1PID;
163 else
164 dr->diepctl[i] |= DXEPCTL_SETD0PID;
165
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300166 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
167 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200168
169 /* Backup OUT EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300170 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200171
172 /* Ensure DATA PID is correctly configured */
173 if (dr->doepctl[i] & DXEPCTL_DPID)
174 dr->doepctl[i] |= DXEPCTL_SETD1PID;
175 else
176 dr->doepctl[i] |= DXEPCTL_SETD0PID;
177
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300178 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
179 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200180 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200181 dr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200182 return 0;
183}
184
185/**
186 * dwc2_restore_device_registers() - Restore controller device registers.
187 * When resuming usb bus, device registers needs to be restored
188 * if controller power were disabled.
189 *
190 * @hsotg: Programming view of the DWC_otg controller
191 */
192static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
193{
194 struct dwc2_dregs_backup *dr;
195 u32 dctl;
196 int i;
197
198 dev_dbg(hsotg->dev, "%s\n", __func__);
199
200 /* Restore dev regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200201 dr = &hsotg->dr_backup;
202 if (!dr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200203 dev_err(hsotg->dev, "%s: no device registers to restore\n",
204 __func__);
205 return -EINVAL;
206 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200207 dr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200208
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300209 dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
210 dwc2_writel(dr->dctl, hsotg->regs + DCTL);
211 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
212 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
213 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200214
215 for (i = 0; i < hsotg->num_of_eps; i++) {
216 /* Restore IN EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300217 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
218 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
219 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200220
221 /* Restore OUT EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300222 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
223 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
224 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200225 }
226
227 /* Set the Power-On Programming done bit */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300228 dctl = dwc2_readl(hsotg->regs + DCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200229 dctl |= DCTL_PWRONPRGDONE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300230 dwc2_writel(dctl, hsotg->regs + DCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200231
232 return 0;
233}
234#else
235static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
236{ return 0; }
237
238static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
239{ return 0; }
240#endif
241
242/**
243 * dwc2_backup_global_registers() - Backup global controller registers.
244 * When suspending usb bus, registers needs to be backuped
245 * if controller power is disabled once suspended.
246 *
247 * @hsotg: Programming view of the DWC_otg controller
248 */
249static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
250{
251 struct dwc2_gregs_backup *gr;
252 int i;
253
254 /* Backup global regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200255 gr = &hsotg->gr_backup;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200256
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300257 gr->gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
258 gr->gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
259 gr->gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
260 gr->gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
261 gr->grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
262 gr->gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
263 gr->hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
264 gr->gdfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200265 for (i = 0; i < MAX_EPS_CHANNELS; i++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300266 gr->dtxfsiz[i] = dwc2_readl(hsotg->regs + DPTXFSIZN(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200267
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200268 gr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200269 return 0;
270}
271
272/**
273 * dwc2_restore_global_registers() - Restore controller global registers.
274 * When resuming usb bus, device registers needs to be restored
275 * if controller power were disabled.
276 *
277 * @hsotg: Programming view of the DWC_otg controller
278 */
279static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
280{
281 struct dwc2_gregs_backup *gr;
282 int i;
283
284 dev_dbg(hsotg->dev, "%s\n", __func__);
285
286 /* Restore global regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200287 gr = &hsotg->gr_backup;
288 if (!gr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200289 dev_err(hsotg->dev, "%s: no global registers to restore\n",
290 __func__);
291 return -EINVAL;
292 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200293 gr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200294
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300295 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
296 dwc2_writel(gr->gotgctl, hsotg->regs + GOTGCTL);
297 dwc2_writel(gr->gintmsk, hsotg->regs + GINTMSK);
298 dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
299 dwc2_writel(gr->gahbcfg, hsotg->regs + GAHBCFG);
300 dwc2_writel(gr->grxfsiz, hsotg->regs + GRXFSIZ);
301 dwc2_writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ);
302 dwc2_writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ);
303 dwc2_writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200304 for (i = 0; i < MAX_EPS_CHANNELS; i++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300305 dwc2_writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200306
307 return 0;
308}
309
310/**
311 * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
312 *
313 * @hsotg: Programming view of the DWC_otg controller
314 * @restore: Controller registers need to be restored
315 */
316int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
317{
318 u32 pcgcctl;
319 int ret = 0;
320
Gregory Herrero285046a2015-04-29 22:09:19 +0200321 if (!hsotg->core_params->hibernation)
322 return -ENOTSUPP;
323
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300324 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200325 pcgcctl &= ~PCGCTL_STOPPCLK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300326 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200327
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300328 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200329 pcgcctl &= ~PCGCTL_PWRCLMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300330 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200331
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300332 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200333 pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300334 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200335
336 udelay(100);
337 if (restore) {
338 ret = dwc2_restore_global_registers(hsotg);
339 if (ret) {
340 dev_err(hsotg->dev, "%s: failed to restore registers\n",
341 __func__);
342 return ret;
343 }
344 if (dwc2_is_host_mode(hsotg)) {
345 ret = dwc2_restore_host_registers(hsotg);
346 if (ret) {
347 dev_err(hsotg->dev, "%s: failed to restore host registers\n",
348 __func__);
349 return ret;
350 }
351 } else {
352 ret = dwc2_restore_device_registers(hsotg);
353 if (ret) {
354 dev_err(hsotg->dev, "%s: failed to restore device registers\n",
355 __func__);
356 return ret;
357 }
358 }
359 }
360
361 return ret;
362}
363
364/**
365 * dwc2_enter_hibernation() - Put controller in Partial Power Down.
366 *
367 * @hsotg: Programming view of the DWC_otg controller
368 */
369int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
370{
371 u32 pcgcctl;
372 int ret = 0;
373
Gregory Herrero285046a2015-04-29 22:09:19 +0200374 if (!hsotg->core_params->hibernation)
375 return -ENOTSUPP;
376
Gregory Herrerod17ee772015-04-29 22:09:01 +0200377 /* Backup all registers */
378 ret = dwc2_backup_global_registers(hsotg);
379 if (ret) {
380 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
381 __func__);
382 return ret;
383 }
384
385 if (dwc2_is_host_mode(hsotg)) {
386 ret = dwc2_backup_host_registers(hsotg);
387 if (ret) {
388 dev_err(hsotg->dev, "%s: failed to backup host registers\n",
389 __func__);
390 return ret;
391 }
392 } else {
393 ret = dwc2_backup_device_registers(hsotg);
394 if (ret) {
395 dev_err(hsotg->dev, "%s: failed to backup device registers\n",
396 __func__);
397 return ret;
398 }
399 }
400
401 /* Put the controller in low power state */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300402 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200403
404 pcgcctl |= PCGCTL_PWRCLMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300405 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200406 ndelay(20);
407
408 pcgcctl |= PCGCTL_RSTPDWNMODULE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300409 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200410 ndelay(20);
411
412 pcgcctl |= PCGCTL_STOPPCLK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300413 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200414
415 return ret;
416}
417
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700418/**
419 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
420 * used in both device and host modes
421 *
422 * @hsotg: Programming view of the DWC_otg controller
423 */
424static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
425{
426 u32 intmsk;
427
428 /* Clear any pending OTG Interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300429 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700430
431 /* Clear any pending interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300432 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700433
434 /* Enable the interrupts in the GINTMSK */
435 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
436
437 if (hsotg->core_params->dma_enable <= 0)
438 intmsk |= GINTSTS_RXFLVL;
Gregory Herreroa6d249d2015-04-29 22:09:04 +0200439 if (hsotg->core_params->external_id_pin_ctl <= 0)
440 intmsk |= GINTSTS_CONIDSTSCHNG;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700441
Gregory Herreroa6d249d2015-04-29 22:09:04 +0200442 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700443 GINTSTS_SESSREQINT;
444
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300445 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700446}
447
448/*
449 * Initializes the FSLSPClkSel field of the HCFG register depending on the
450 * PHY type
451 */
452static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
453{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700454 u32 hcfg, val;
455
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200456 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
457 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700458 hsotg->core_params->ulpi_fs_ls > 0) ||
459 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
460 /* Full speed PHY */
461 val = HCFG_FSLSPCLKSEL_48_MHZ;
462 } else {
463 /* High speed PHY running at full speed or high speed */
464 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
465 }
466
467 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300468 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700469 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200470 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300471 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700472}
473
474/*
475 * Do core a soft reset of the core. Be careful with this because it
476 * resets all the internal state machines of the core.
477 */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100478static int dwc2_core_reset(struct dwc2_hsotg *hsotg)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700479{
480 u32 greset;
481 int count = 0;
Kever Yangc0155b92014-08-06 09:01:50 +0800482 u32 gusbcfg;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700483
484 dev_vdbg(hsotg->dev, "%s()\n", __func__);
485
486 /* Wait for AHB master IDLE state */
487 do {
488 usleep_range(20000, 40000);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300489 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700490 if (++count > 50) {
491 dev_warn(hsotg->dev,
492 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
493 __func__, greset);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100494 return -EBUSY;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700495 }
496 } while (!(greset & GRSTCTL_AHBIDLE));
497
498 /* Core Soft Reset */
499 count = 0;
500 greset |= GRSTCTL_CSFTRST;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300501 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700502 do {
503 usleep_range(20000, 40000);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300504 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700505 if (++count > 50) {
506 dev_warn(hsotg->dev,
507 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
508 __func__, greset);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100509 return -EBUSY;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700510 }
511 } while (greset & GRSTCTL_CSFTRST);
512
Kever Yangc0155b92014-08-06 09:01:50 +0800513 if (hsotg->dr_mode == USB_DR_MODE_HOST) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300514 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800515 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
516 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300517 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800518 } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300519 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800520 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
521 gusbcfg |= GUSBCFG_FORCEDEVMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300522 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800523 } else if (hsotg->dr_mode == USB_DR_MODE_OTG) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300524 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800525 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
526 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300527 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800528 }
529
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700530 /*
531 * NOTE: This long sleep is _very_ important, otherwise the core will
532 * not stay in host mode after a connector ID change!
533 */
534 usleep_range(150000, 200000);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100535
536 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700537}
538
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100539static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700540{
541 u32 usbcfg, i2cctl;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100542 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700543
544 /*
545 * core_init() is now called on every switch so only call the
546 * following for the first time through
547 */
548 if (select_phy) {
549 dev_dbg(hsotg->dev, "FS PHY selected\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300550 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700551 usbcfg |= GUSBCFG_PHYSEL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300552 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700553
554 /* Reset after a PHY select */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100555 retval = dwc2_core_reset(hsotg);
556 if (retval) {
557 dev_err(hsotg->dev, "%s() Reset failed, aborting",
558 __func__);
559 return retval;
560 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700561 }
562
563 /*
564 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
565 * do this on HNP Dev/Host mode switches (done in dev_init and
566 * host_init).
567 */
568 if (dwc2_is_host_mode(hsotg))
569 dwc2_init_fs_ls_pclk_sel(hsotg);
570
571 if (hsotg->core_params->i2c_enable > 0) {
572 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
573
574 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300575 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700576 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300577 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700578
579 /* Program GI2CCTL.I2CEn */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300580 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700581 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
582 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
583 i2cctl &= ~GI2CCTL_I2CEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300584 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700585 i2cctl |= GI2CCTL_I2CEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300586 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700587 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100588
589 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700590}
591
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100592static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700593{
594 u32 usbcfg;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100595 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700596
597 if (!select_phy)
Paul Zimmermana23666c2014-02-04 11:42:15 -0800598 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700599
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300600 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700601
602 /*
603 * HS PHY parameters. These parameters are preserved during soft reset
604 * so only program the first time. Do a soft reset immediately after
605 * setting phyif.
606 */
607 switch (hsotg->core_params->phy_type) {
608 case DWC2_PHY_TYPE_PARAM_ULPI:
609 /* ULPI interface */
610 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
611 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
612 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
613 if (hsotg->core_params->phy_ulpi_ddr > 0)
614 usbcfg |= GUSBCFG_DDRSEL;
615 break;
616 case DWC2_PHY_TYPE_PARAM_UTMI:
617 /* UTMI+ interface */
618 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
619 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
620 if (hsotg->core_params->phy_utmi_width == 16)
621 usbcfg |= GUSBCFG_PHYIF16;
622 break;
623 default:
624 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
625 break;
626 }
627
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300628 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700629
630 /* Reset after setting the PHY parameters */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100631 retval = dwc2_core_reset(hsotg);
632 if (retval) {
633 dev_err(hsotg->dev, "%s() Reset failed, aborting",
634 __func__);
635 return retval;
636 }
637
638 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700639}
640
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100641static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700642{
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200643 u32 usbcfg;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100644 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700645
646 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
647 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
648 /* If FS mode with FS PHY */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100649 retval = dwc2_fs_phy_init(hsotg, select_phy);
650 if (retval)
651 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700652 } else {
653 /* High speed PHY */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100654 retval = dwc2_hs_phy_init(hsotg, select_phy);
655 if (retval)
656 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700657 }
658
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200659 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
660 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700661 hsotg->core_params->ulpi_fs_ls > 0) {
662 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300663 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700664 usbcfg |= GUSBCFG_ULPI_FS_LS;
665 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300666 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700667 } else {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300668 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700669 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
670 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300671 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700672 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100673
674 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700675}
676
677static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
678{
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300679 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700680
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200681 switch (hsotg->hw_params.arch) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700682 case GHWCFG2_EXT_DMA_ARCH:
683 dev_err(hsotg->dev, "External DMA Mode not supported\n");
684 return -EINVAL;
685
686 case GHWCFG2_INT_DMA_ARCH:
687 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700688 if (hsotg->core_params->ahbcfg != -1) {
689 ahbcfg &= GAHBCFG_CTRL_MASK;
690 ahbcfg |= hsotg->core_params->ahbcfg &
691 ~GAHBCFG_CTRL_MASK;
692 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700693 break;
694
695 case GHWCFG2_SLAVE_ONLY_ARCH:
696 default:
697 dev_dbg(hsotg->dev, "Slave Only Mode\n");
698 break;
699 }
700
701 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
702 hsotg->core_params->dma_enable,
703 hsotg->core_params->dma_desc_enable);
704
705 if (hsotg->core_params->dma_enable > 0) {
706 if (hsotg->core_params->dma_desc_enable > 0)
707 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
708 else
709 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
710 } else {
711 dev_dbg(hsotg->dev, "Using Slave mode\n");
712 hsotg->core_params->dma_desc_enable = 0;
713 }
714
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700715 if (hsotg->core_params->dma_enable > 0)
716 ahbcfg |= GAHBCFG_DMA_EN;
717
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300718 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700719
720 return 0;
721}
722
723static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
724{
725 u32 usbcfg;
726
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300727 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700728 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
729
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200730 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700731 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
732 if (hsotg->core_params->otg_cap ==
733 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
734 usbcfg |= GUSBCFG_HNPCAP;
735 if (hsotg->core_params->otg_cap !=
736 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
737 usbcfg |= GUSBCFG_SRPCAP;
738 break;
739
740 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
741 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
742 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
743 if (hsotg->core_params->otg_cap !=
744 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
745 usbcfg |= GUSBCFG_SRPCAP;
746 break;
747
748 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
749 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
750 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
751 default:
752 break;
753 }
754
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300755 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700756}
757
758/**
759 * dwc2_core_init() - Initializes the DWC_otg controller registers and
760 * prepares the core for device mode or host mode operation
761 *
762 * @hsotg: Programming view of the DWC_otg controller
763 * @select_phy: If true then also set the Phy type
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200764 * @irq: If >= 0, the irq to register
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700765 */
Matthijs Kooijman6706c722013-04-11 17:52:41 +0200766int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700767{
768 u32 usbcfg, otgctl;
769 int retval;
770
771 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
772
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300773 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700774
775 /* Set ULPI External VBUS bit if needed */
776 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
777 if (hsotg->core_params->phy_ulpi_ext_vbus ==
778 DWC2_PHY_ULPI_EXTERNAL_VBUS)
779 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
780
781 /* Set external TS Dline pulsing bit if needed */
782 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
783 if (hsotg->core_params->ts_dline > 0)
784 usbcfg |= GUSBCFG_TERMSELDLPULSE;
785
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300786 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700787
788 /* Reset the Controller */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100789 retval = dwc2_core_reset(hsotg);
790 if (retval) {
791 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
792 __func__);
793 return retval;
794 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700795
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700796 /*
797 * This needs to happen in FS mode before any other programming occurs
798 */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100799 retval = dwc2_phy_init(hsotg, select_phy);
800 if (retval)
801 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700802
803 /* Program the GAHBCFG Register */
804 retval = dwc2_gahbcfg_init(hsotg);
805 if (retval)
806 return retval;
807
808 /* Program the GUSBCFG register */
809 dwc2_gusbcfg_init(hsotg);
810
811 /* Program the GOTGCTL register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300812 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700813 otgctl &= ~GOTGCTL_OTGVER;
814 if (hsotg->core_params->otg_ver > 0)
815 otgctl |= GOTGCTL_OTGVER;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300816 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700817 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
818
819 /* Clear the SRP success bit for FS-I2c */
820 hsotg->srp_success = 0;
821
822 /* Enable common interrupts */
823 dwc2_enable_common_interrupts(hsotg);
824
825 /*
Mickael Maison997f4f82014-12-23 17:39:45 +0100826 * Do device or host initialization based on mode during PCD and
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700827 * HCD initialization
828 */
829 if (dwc2_is_host_mode(hsotg)) {
830 dev_dbg(hsotg->dev, "Host Mode\n");
831 hsotg->op_state = OTG_STATE_A_HOST;
832 } else {
833 dev_dbg(hsotg->dev, "Device Mode\n");
834 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
835 }
836
837 return 0;
838}
839
840/**
841 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
842 *
843 * @hsotg: Programming view of DWC_otg controller
844 */
845void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
846{
847 u32 intmsk;
848
849 dev_dbg(hsotg->dev, "%s()\n", __func__);
850
851 /* Disable all interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300852 dwc2_writel(0, hsotg->regs + GINTMSK);
853 dwc2_writel(0, hsotg->regs + HAINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700854
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700855 /* Enable the common interrupts */
856 dwc2_enable_common_interrupts(hsotg);
857
858 /* Enable host mode interrupts without disturbing common interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300859 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Mian Yousaf Kaukab77dbf712015-09-22 15:16:47 +0200860 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT |
861 GINTSTS_DISCONNINT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300862 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700863}
864
865/**
866 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
867 *
868 * @hsotg: Programming view of DWC_otg controller
869 */
870void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
871{
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300872 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700873
874 /* Disable host mode interrupts without disturbing common interrupts */
875 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
Mian Yousaf Kaukab77dbf712015-09-22 15:16:47 +0200876 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300877 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700878}
879
Dinh Nguyen112fe8e2014-05-07 08:31:29 -0500880/*
881 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
882 * For system that have a total fifo depth that is smaller than the default
883 * RX + TX fifo size.
884 *
885 * @hsotg: Programming view of DWC_otg controller
886 */
887static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
888{
889 struct dwc2_core_params *params = hsotg->core_params;
890 struct dwc2_hw_params *hw = &hsotg->hw_params;
891 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
892
893 total_fifo_size = hw->total_fifo_size;
894 rxfsiz = params->host_rx_fifo_size;
895 nptxfsiz = params->host_nperio_tx_fifo_size;
896 ptxfsiz = params->host_perio_tx_fifo_size;
897
898 /*
899 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
900 * allocation with support for high bandwidth endpoints. Synopsys
901 * defines MPS(Max Packet size) for a periodic EP=1024, and for
902 * non-periodic as 512.
903 */
904 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
905 /*
906 * For Buffer DMA mode/Scatter Gather DMA mode
907 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
908 * with n = number of host channel.
909 * 2 * ((1024/4) + 2) = 516
910 */
911 rxfsiz = 516 + hw->host_channels;
912
913 /*
914 * min non-periodic tx fifo depth
915 * 2 * (largest non-periodic USB packet used / 4)
916 * 2 * (512/4) = 256
917 */
918 nptxfsiz = 256;
919
920 /*
921 * min periodic tx fifo depth
922 * (largest packet size*MC)/4
923 * (1024 * 3)/4 = 768
924 */
925 ptxfsiz = 768;
926
927 params->host_rx_fifo_size = rxfsiz;
928 params->host_nperio_tx_fifo_size = nptxfsiz;
929 params->host_perio_tx_fifo_size = ptxfsiz;
930 }
931
932 /*
933 * If the summation of RX, NPTX and PTX fifo sizes is still
934 * bigger than the total_fifo_size, then we have a problem.
935 *
936 * We won't be able to allocate as many endpoints. Right now,
937 * we're just printing an error message, but ideally this FIFO
938 * allocation algorithm would be improved in the future.
939 *
940 * FIXME improve this FIFO allocation algorithm.
941 */
942 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
943 dev_err(hsotg->dev, "invalid fifo sizes\n");
944}
945
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700946static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
947{
948 struct dwc2_core_params *params = hsotg->core_params;
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200949 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700950
Matthijs Kooijman12086052013-04-29 19:46:35 +0000951 if (!params->enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700952 return;
953
Dinh Nguyen112fe8e2014-05-07 08:31:29 -0500954 dwc2_calculate_dynamic_fifo(hsotg);
955
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700956 /* Rx FIFO */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300957 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200958 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
959 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
960 grxfsiz |= params->host_rx_fifo_size <<
961 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300962 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
963 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
964 dwc2_readl(hsotg->regs + GRXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700965
966 /* Non-periodic Tx FIFO */
967 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300968 dwc2_readl(hsotg->regs + GNPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700969 nptxfsiz = params->host_nperio_tx_fifo_size <<
970 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
971 nptxfsiz |= params->host_rx_fifo_size <<
972 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300973 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700974 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300975 dwc2_readl(hsotg->regs + GNPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700976
977 /* Periodic Tx FIFO */
978 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300979 dwc2_readl(hsotg->regs + HPTXFSIZ));
Matthijs Kooijmanc35205a2013-08-30 18:45:18 +0200980 hptxfsiz = params->host_perio_tx_fifo_size <<
981 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
982 hptxfsiz |= (params->host_rx_fifo_size +
983 params->host_nperio_tx_fifo_size) <<
984 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300985 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700986 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300987 dwc2_readl(hsotg->regs + HPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700988
989 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200990 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700991 /*
992 * Global DFIFOCFG calculation for Host mode -
993 * include RxFIFO, NPTXFIFO and HPTXFIFO
994 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300995 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700996 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
Matthijs Kooijman08b9f9d2013-08-30 18:45:19 +0200997 dfifocfg |= (params->host_rx_fifo_size +
998 params->host_nperio_tx_fifo_size +
999 params->host_perio_tx_fifo_size) <<
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001000 GDFIFOCFG_EPINFOBASE_SHIFT &
1001 GDFIFOCFG_EPINFOBASE_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001002 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001003 }
1004}
1005
1006/**
1007 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1008 * Host mode
1009 *
1010 * @hsotg: Programming view of DWC_otg controller
1011 *
1012 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1013 * request queues. Host channels are reset to ensure that they are ready for
1014 * performing transfers.
1015 */
1016void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
1017{
1018 u32 hcfg, hfir, otgctl;
1019
1020 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
1021
1022 /* Restart the Phy Clock */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001023 dwc2_writel(0, hsotg->regs + PCGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001024
1025 /* Initialize Host Configuration Register */
1026 dwc2_init_fs_ls_pclk_sel(hsotg);
1027 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001028 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001029 hcfg |= HCFG_FSLSSUPP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001030 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001031 }
1032
1033 /*
1034 * This bit allows dynamic reloading of the HFIR register during
Masanari Iida0dcde5082013-09-13 23:34:36 +09001035 * runtime. This bit needs to be programmed during initial configuration
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001036 * and its value must not be changed during runtime.
1037 */
1038 if (hsotg->core_params->reload_ctl > 0) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001039 hfir = dwc2_readl(hsotg->regs + HFIR);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001040 hfir |= HFIR_RLDCTRL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001041 dwc2_writel(hfir, hsotg->regs + HFIR);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001042 }
1043
1044 if (hsotg->core_params->dma_desc_enable > 0) {
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001045 u32 op_mode = hsotg->hw_params.op_mode;
1046 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
1047 !hsotg->hw_params.dma_desc_enable ||
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001048 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
1049 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
1050 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
1051 dev_err(hsotg->dev,
1052 "Hardware does not support descriptor DMA mode -\n");
1053 dev_err(hsotg->dev,
1054 "falling back to buffer DMA mode.\n");
1055 hsotg->core_params->dma_desc_enable = 0;
1056 } else {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001057 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001058 hcfg |= HCFG_DESCDMA;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001059 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001060 }
1061 }
1062
1063 /* Configure data FIFO sizes */
1064 dwc2_config_fifos(hsotg);
1065
1066 /* TODO - check this */
1067 /* Clear Host Set HNP Enable in the OTG Control Register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001068 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001069 otgctl &= ~GOTGCTL_HSTSETHNPEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001070 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001071
1072 /* Make sure the FIFOs are flushed */
1073 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
1074 dwc2_flush_rx_fifo(hsotg);
1075
1076 /* Clear Host Set HNP Enable in the OTG Control Register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001077 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001078 otgctl &= ~GOTGCTL_HSTSETHNPEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001079 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001080
1081 if (hsotg->core_params->dma_desc_enable <= 0) {
1082 int num_channels, i;
1083 u32 hcchar;
1084
1085 /* Flush out any leftover queued requests */
1086 num_channels = hsotg->core_params->host_channels;
1087 for (i = 0; i < num_channels; i++) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001088 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001089 hcchar &= ~HCCHAR_CHENA;
1090 hcchar |= HCCHAR_CHDIS;
1091 hcchar &= ~HCCHAR_EPDIR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001092 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001093 }
1094
1095 /* Halt all channels to put them into a known state */
1096 for (i = 0; i < num_channels; i++) {
1097 int count = 0;
1098
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001099 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001100 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
1101 hcchar &= ~HCCHAR_EPDIR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001102 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001103 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
1104 __func__, i);
1105 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001106 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001107 if (++count > 1000) {
1108 dev_err(hsotg->dev,
1109 "Unable to clear enable on channel %d\n",
1110 i);
1111 break;
1112 }
1113 udelay(1);
1114 } while (hcchar & HCCHAR_CHENA);
1115 }
1116 }
1117
1118 /* Turn on the vbus power */
1119 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
1120 if (hsotg->op_state == OTG_STATE_A_HOST) {
1121 u32 hprt0 = dwc2_read_hprt0(hsotg);
1122
1123 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
1124 !!(hprt0 & HPRT0_PWR));
1125 if (!(hprt0 & HPRT0_PWR)) {
1126 hprt0 |= HPRT0_PWR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001127 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001128 }
1129 }
1130
1131 dwc2_enable_host_interrupts(hsotg);
1132}
1133
1134static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
1135 struct dwc2_host_chan *chan)
1136{
1137 u32 hcintmsk = HCINTMSK_CHHLTD;
1138
1139 switch (chan->ep_type) {
1140 case USB_ENDPOINT_XFER_CONTROL:
1141 case USB_ENDPOINT_XFER_BULK:
1142 dev_vdbg(hsotg->dev, "control/bulk\n");
1143 hcintmsk |= HCINTMSK_XFERCOMPL;
1144 hcintmsk |= HCINTMSK_STALL;
1145 hcintmsk |= HCINTMSK_XACTERR;
1146 hcintmsk |= HCINTMSK_DATATGLERR;
1147 if (chan->ep_is_in) {
1148 hcintmsk |= HCINTMSK_BBLERR;
1149 } else {
1150 hcintmsk |= HCINTMSK_NAK;
1151 hcintmsk |= HCINTMSK_NYET;
1152 if (chan->do_ping)
1153 hcintmsk |= HCINTMSK_ACK;
1154 }
1155
1156 if (chan->do_split) {
1157 hcintmsk |= HCINTMSK_NAK;
1158 if (chan->complete_split)
1159 hcintmsk |= HCINTMSK_NYET;
1160 else
1161 hcintmsk |= HCINTMSK_ACK;
1162 }
1163
1164 if (chan->error_state)
1165 hcintmsk |= HCINTMSK_ACK;
1166 break;
1167
1168 case USB_ENDPOINT_XFER_INT:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001169 if (dbg_perio())
1170 dev_vdbg(hsotg->dev, "intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001171 hcintmsk |= HCINTMSK_XFERCOMPL;
1172 hcintmsk |= HCINTMSK_NAK;
1173 hcintmsk |= HCINTMSK_STALL;
1174 hcintmsk |= HCINTMSK_XACTERR;
1175 hcintmsk |= HCINTMSK_DATATGLERR;
1176 hcintmsk |= HCINTMSK_FRMOVRUN;
1177
1178 if (chan->ep_is_in)
1179 hcintmsk |= HCINTMSK_BBLERR;
1180 if (chan->error_state)
1181 hcintmsk |= HCINTMSK_ACK;
1182 if (chan->do_split) {
1183 if (chan->complete_split)
1184 hcintmsk |= HCINTMSK_NYET;
1185 else
1186 hcintmsk |= HCINTMSK_ACK;
1187 }
1188 break;
1189
1190 case USB_ENDPOINT_XFER_ISOC:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001191 if (dbg_perio())
1192 dev_vdbg(hsotg->dev, "isoc\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001193 hcintmsk |= HCINTMSK_XFERCOMPL;
1194 hcintmsk |= HCINTMSK_FRMOVRUN;
1195 hcintmsk |= HCINTMSK_ACK;
1196
1197 if (chan->ep_is_in) {
1198 hcintmsk |= HCINTMSK_XACTERR;
1199 hcintmsk |= HCINTMSK_BBLERR;
1200 }
1201 break;
1202 default:
1203 dev_err(hsotg->dev, "## Unknown EP type ##\n");
1204 break;
1205 }
1206
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001207 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001208 if (dbg_hc(chan))
1209 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001210}
1211
1212static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
1213 struct dwc2_host_chan *chan)
1214{
1215 u32 hcintmsk = HCINTMSK_CHHLTD;
1216
1217 /*
1218 * For Descriptor DMA mode core halts the channel on AHB error.
1219 * Interrupt is not required.
1220 */
1221 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001222 if (dbg_hc(chan))
1223 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001224 hcintmsk |= HCINTMSK_AHBERR;
1225 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001226 if (dbg_hc(chan))
1227 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001228 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1229 hcintmsk |= HCINTMSK_XFERCOMPL;
1230 }
1231
1232 if (chan->error_state && !chan->do_split &&
1233 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001234 if (dbg_hc(chan))
1235 dev_vdbg(hsotg->dev, "setting ACK\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001236 hcintmsk |= HCINTMSK_ACK;
1237 if (chan->ep_is_in) {
1238 hcintmsk |= HCINTMSK_DATATGLERR;
1239 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
1240 hcintmsk |= HCINTMSK_NAK;
1241 }
1242 }
1243
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001244 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001245 if (dbg_hc(chan))
1246 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001247}
1248
1249static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
1250 struct dwc2_host_chan *chan)
1251{
1252 u32 intmsk;
1253
1254 if (hsotg->core_params->dma_enable > 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001255 if (dbg_hc(chan))
1256 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001257 dwc2_hc_enable_dma_ints(hsotg, chan);
1258 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001259 if (dbg_hc(chan))
1260 dev_vdbg(hsotg->dev, "DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001261 dwc2_hc_enable_slave_ints(hsotg, chan);
1262 }
1263
1264 /* Enable the top level host channel interrupt */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001265 intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001266 intmsk |= 1 << chan->hc_num;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001267 dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001268 if (dbg_hc(chan))
1269 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001270
1271 /* Make sure host channel interrupts are enabled */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001272 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001273 intmsk |= GINTSTS_HCHINT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001274 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001275 if (dbg_hc(chan))
1276 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001277}
1278
1279/**
1280 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1281 * a specific endpoint
1282 *
1283 * @hsotg: Programming view of DWC_otg controller
1284 * @chan: Information needed to initialize the host channel
1285 *
1286 * The HCCHARn register is set up with the characteristics specified in chan.
1287 * Host channel interrupts that may need to be serviced while this transfer is
1288 * in progress are enabled.
1289 */
1290void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1291{
1292 u8 hc_num = chan->hc_num;
1293 u32 hcintmsk;
1294 u32 hcchar;
1295 u32 hcsplt = 0;
1296
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001297 if (dbg_hc(chan))
1298 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001299
1300 /* Clear old interrupt conditions for this host channel */
1301 hcintmsk = 0xffffffff;
1302 hcintmsk &= ~HCINTMSK_RESERVED14_31;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001303 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001304
1305 /* Enable channel interrupts required for this transfer */
1306 dwc2_hc_enable_ints(hsotg, chan);
1307
1308 /*
1309 * Program the HCCHARn register with the endpoint characteristics for
1310 * the current transfer
1311 */
1312 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
1313 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
1314 if (chan->ep_is_in)
1315 hcchar |= HCCHAR_EPDIR;
1316 if (chan->speed == USB_SPEED_LOW)
1317 hcchar |= HCCHAR_LSPDDEV;
1318 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
1319 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001320 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001321 if (dbg_hc(chan)) {
1322 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
1323 hc_num, hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001324
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001325 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
1326 __func__, hc_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001327 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001328 chan->dev_addr);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001329 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001330 chan->ep_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001331 dev_vdbg(hsotg->dev, " Is In: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001332 chan->ep_is_in);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001333 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001334 chan->speed == USB_SPEED_LOW);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001335 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001336 chan->ep_type);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001337 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001338 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001339 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001340
1341 /* Program the HCSPLT register for SPLITs */
1342 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001343 if (dbg_hc(chan))
1344 dev_vdbg(hsotg->dev,
1345 "Programming HC %d with split --> %s\n",
1346 hc_num,
1347 chan->complete_split ? "CSPLIT" : "SSPLIT");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001348 if (chan->complete_split)
1349 hcsplt |= HCSPLT_COMPSPLT;
1350 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
1351 HCSPLT_XACTPOS_MASK;
1352 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1353 HCSPLT_HUBADDR_MASK;
1354 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1355 HCSPLT_PRTADDR_MASK;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001356 if (dbg_hc(chan)) {
1357 dev_vdbg(hsotg->dev, " comp split %d\n",
1358 chan->complete_split);
1359 dev_vdbg(hsotg->dev, " xact pos %d\n",
1360 chan->xact_pos);
1361 dev_vdbg(hsotg->dev, " hub addr %d\n",
1362 chan->hub_addr);
1363 dev_vdbg(hsotg->dev, " hub port %d\n",
1364 chan->hub_port);
1365 dev_vdbg(hsotg->dev, " is_in %d\n",
1366 chan->ep_is_in);
1367 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001368 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001369 dev_vdbg(hsotg->dev, " xferlen %d\n",
1370 chan->xfer_len);
1371 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001372 }
1373
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001374 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001375}
1376
1377/**
1378 * dwc2_hc_halt() - Attempts to halt a host channel
1379 *
1380 * @hsotg: Controller register interface
1381 * @chan: Host channel to halt
1382 * @halt_status: Reason for halting the channel
1383 *
1384 * This function should only be called in Slave mode or to abort a transfer in
1385 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1386 * controller halts the channel when the transfer is complete or a condition
1387 * occurs that requires application intervention.
1388 *
1389 * In slave mode, checks for a free request queue entry, then sets the Channel
1390 * Enable and Channel Disable bits of the Host Channel Characteristics
1391 * register of the specified channel to intiate the halt. If there is no free
1392 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1393 * register to flush requests for this channel. In the latter case, sets a
1394 * flag to indicate that the host channel needs to be halted when a request
1395 * queue slot is open.
1396 *
1397 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1398 * HCCHARn register. The controller ensures there is space in the request
1399 * queue before submitting the halt request.
1400 *
1401 * Some time may elapse before the core flushes any posted requests for this
1402 * host channel and halts. The Channel Halted interrupt handler completes the
1403 * deactivation of the host channel.
1404 */
1405void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1406 enum dwc2_halt_status halt_status)
1407{
1408 u32 nptxsts, hptxsts, hcchar;
1409
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001410 if (dbg_hc(chan))
1411 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001412 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1413 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1414
1415 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1416 halt_status == DWC2_HC_XFER_AHB_ERR) {
1417 /*
1418 * Disable all channel interrupts except Ch Halted. The QTD
1419 * and QH state associated with this transfer has been cleared
1420 * (in the case of URB_DEQUEUE), so the channel needs to be
1421 * shut down carefully to prevent crashes.
1422 */
1423 u32 hcintmsk = HCINTMSK_CHHLTD;
1424
1425 dev_vdbg(hsotg->dev, "dequeue/error\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001426 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001427
1428 /*
1429 * Make sure no other interrupts besides halt are currently
1430 * pending. Handling another interrupt could cause a crash due
1431 * to the QTD and QH state.
1432 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001433 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001434
1435 /*
1436 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1437 * even if the channel was already halted for some other
1438 * reason
1439 */
1440 chan->halt_status = halt_status;
1441
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001442 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001443 if (!(hcchar & HCCHAR_CHENA)) {
1444 /*
1445 * The channel is either already halted or it hasn't
1446 * started yet. In DMA mode, the transfer may halt if
1447 * it finishes normally or a condition occurs that
1448 * requires driver intervention. Don't want to halt
1449 * the channel again. In either Slave or DMA mode,
1450 * it's possible that the transfer has been assigned
1451 * to a channel, but not started yet when an URB is
1452 * dequeued. Don't want to halt a channel that hasn't
1453 * started yet.
1454 */
1455 return;
1456 }
1457 }
1458 if (chan->halt_pending) {
1459 /*
1460 * A halt has already been issued for this channel. This might
1461 * happen when a transfer is aborted by a higher level in
1462 * the stack.
1463 */
1464 dev_vdbg(hsotg->dev,
1465 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1466 __func__, chan->hc_num);
1467 return;
1468 }
1469
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001470 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001471
1472 /* No need to set the bit in DDMA for disabling the channel */
1473 /* TODO check it everywhere channel is disabled */
1474 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001475 if (dbg_hc(chan))
1476 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001477 hcchar |= HCCHAR_CHENA;
1478 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001479 if (dbg_hc(chan))
1480 dev_dbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001481 }
1482 hcchar |= HCCHAR_CHDIS;
1483
1484 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001485 if (dbg_hc(chan))
1486 dev_vdbg(hsotg->dev, "DMA not enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001487 hcchar |= HCCHAR_CHENA;
1488
1489 /* Check for space in the request queue to issue the halt */
1490 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1491 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1492 dev_vdbg(hsotg->dev, "control/bulk\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001493 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001494 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1495 dev_vdbg(hsotg->dev, "Disabling channel\n");
1496 hcchar &= ~HCCHAR_CHENA;
1497 }
1498 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001499 if (dbg_perio())
1500 dev_vdbg(hsotg->dev, "isoc/intr\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001501 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001502 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1503 hsotg->queuing_high_bandwidth) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001504 if (dbg_perio())
1505 dev_vdbg(hsotg->dev, "Disabling channel\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001506 hcchar &= ~HCCHAR_CHENA;
1507 }
1508 }
1509 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001510 if (dbg_hc(chan))
1511 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001512 }
1513
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001514 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001515 chan->halt_status = halt_status;
1516
1517 if (hcchar & HCCHAR_CHENA) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001518 if (dbg_hc(chan))
1519 dev_vdbg(hsotg->dev, "Channel enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001520 chan->halt_pending = 1;
1521 chan->halt_on_queue = 0;
1522 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001523 if (dbg_hc(chan))
1524 dev_vdbg(hsotg->dev, "Channel disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001525 chan->halt_on_queue = 1;
1526 }
1527
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001528 if (dbg_hc(chan)) {
1529 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1530 chan->hc_num);
1531 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1532 hcchar);
1533 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1534 chan->halt_pending);
1535 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1536 chan->halt_on_queue);
1537 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1538 chan->halt_status);
1539 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001540}
1541
1542/**
1543 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1544 *
1545 * @hsotg: Programming view of DWC_otg controller
1546 * @chan: Identifies the host channel to clean up
1547 *
1548 * This function is normally called after a transfer is done and the host
1549 * channel is being released
1550 */
1551void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1552{
1553 u32 hcintmsk;
1554
1555 chan->xfer_started = 0;
1556
1557 /*
1558 * Clear channel interrupt enables and any unhandled channel interrupt
1559 * conditions
1560 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001561 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001562 hcintmsk = 0xffffffff;
1563 hcintmsk &= ~HCINTMSK_RESERVED14_31;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001564 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001565}
1566
1567/**
1568 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1569 * which frame a periodic transfer should occur
1570 *
1571 * @hsotg: Programming view of DWC_otg controller
1572 * @chan: Identifies the host channel to set up and its properties
1573 * @hcchar: Current value of the HCCHAR register for the specified host channel
1574 *
1575 * This function has no effect on non-periodic transfers
1576 */
1577static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1578 struct dwc2_host_chan *chan, u32 *hcchar)
1579{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001580 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1581 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001582 /* 1 if _next_ frame is odd, 0 if it's even */
Paul Zimmerman81a58952013-06-24 11:34:23 -07001583 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001584 *hcchar |= HCCHAR_ODDFRM;
1585 }
1586}
1587
1588static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1589{
1590 /* Set up the initial PID for the transfer */
1591 if (chan->speed == USB_SPEED_HIGH) {
1592 if (chan->ep_is_in) {
1593 if (chan->multi_count == 1)
1594 chan->data_pid_start = DWC2_HC_PID_DATA0;
1595 else if (chan->multi_count == 2)
1596 chan->data_pid_start = DWC2_HC_PID_DATA1;
1597 else
1598 chan->data_pid_start = DWC2_HC_PID_DATA2;
1599 } else {
1600 if (chan->multi_count == 1)
1601 chan->data_pid_start = DWC2_HC_PID_DATA0;
1602 else
1603 chan->data_pid_start = DWC2_HC_PID_MDATA;
1604 }
1605 } else {
1606 chan->data_pid_start = DWC2_HC_PID_DATA0;
1607 }
1608}
1609
1610/**
1611 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1612 * the Host Channel
1613 *
1614 * @hsotg: Programming view of DWC_otg controller
1615 * @chan: Information needed to initialize the host channel
1616 *
1617 * This function should only be called in Slave mode. For a channel associated
1618 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1619 * associated with a periodic EP, the periodic Tx FIFO is written.
1620 *
1621 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1622 * the number of bytes written to the Tx FIFO.
1623 */
1624static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1625 struct dwc2_host_chan *chan)
1626{
1627 u32 i;
1628 u32 remaining_count;
1629 u32 byte_count;
1630 u32 dword_count;
1631 u32 __iomem *data_fifo;
1632 u32 *data_buf = (u32 *)chan->xfer_buf;
1633
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001634 if (dbg_hc(chan))
1635 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001636
1637 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1638
1639 remaining_count = chan->xfer_len - chan->xfer_count;
1640 if (remaining_count > chan->max_packet)
1641 byte_count = chan->max_packet;
1642 else
1643 byte_count = remaining_count;
1644
1645 dword_count = (byte_count + 3) / 4;
1646
1647 if (((unsigned long)data_buf & 0x3) == 0) {
1648 /* xfer_buf is DWORD aligned */
1649 for (i = 0; i < dword_count; i++, data_buf++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001650 dwc2_writel(*data_buf, data_fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001651 } else {
1652 /* xfer_buf is not DWORD aligned */
1653 for (i = 0; i < dword_count; i++, data_buf++) {
1654 u32 data = data_buf[0] | data_buf[1] << 8 |
1655 data_buf[2] << 16 | data_buf[3] << 24;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001656 dwc2_writel(data, data_fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001657 }
1658 }
1659
1660 chan->xfer_count += byte_count;
1661 chan->xfer_buf += byte_count;
1662}
1663
1664/**
1665 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1666 * channel and starts the transfer
1667 *
1668 * @hsotg: Programming view of DWC_otg controller
1669 * @chan: Information needed to initialize the host channel. The xfer_len value
1670 * may be reduced to accommodate the max widths of the XferSize and
1671 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1672 * changed to reflect the final xfer_len value.
1673 *
1674 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1675 * the caller must ensure that there is sufficient space in the request queue
1676 * and Tx Data FIFO.
1677 *
1678 * For an OUT transfer in Slave mode, it loads a data packet into the
1679 * appropriate FIFO. If necessary, additional data packets are loaded in the
1680 * Host ISR.
1681 *
1682 * For an IN transfer in Slave mode, a data packet is requested. The data
1683 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1684 * additional data packets are requested in the Host ISR.
1685 *
1686 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1687 * register along with a packet count of 1 and the channel is enabled. This
1688 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1689 * simply set to 0 since no data transfer occurs in this case.
1690 *
1691 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1692 * all the information required to perform the subsequent data transfer. In
1693 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1694 * controller performs the entire PING protocol, then starts the data
1695 * transfer.
1696 */
1697void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1698 struct dwc2_host_chan *chan)
1699{
1700 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1701 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1702 u32 hcchar;
1703 u32 hctsiz = 0;
1704 u16 num_packets;
1705
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001706 if (dbg_hc(chan))
1707 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001708
1709 if (chan->do_ping) {
1710 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001711 if (dbg_hc(chan))
1712 dev_vdbg(hsotg->dev, "ping, no DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001713 dwc2_hc_do_ping(hsotg, chan);
1714 chan->xfer_started = 1;
1715 return;
1716 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001717 if (dbg_hc(chan))
1718 dev_vdbg(hsotg->dev, "ping, DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001719 hctsiz |= TSIZ_DOPNG;
1720 }
1721 }
1722
1723 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001724 if (dbg_hc(chan))
1725 dev_vdbg(hsotg->dev, "split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001726 num_packets = 1;
1727
1728 if (chan->complete_split && !chan->ep_is_in)
1729 /*
1730 * For CSPLIT OUT Transfer, set the size to 0 so the
1731 * core doesn't expect any data written to the FIFO
1732 */
1733 chan->xfer_len = 0;
1734 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1735 chan->xfer_len = chan->max_packet;
1736 else if (!chan->ep_is_in && chan->xfer_len > 188)
1737 chan->xfer_len = 188;
1738
1739 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1740 TSIZ_XFERSIZE_MASK;
1741 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001742 if (dbg_hc(chan))
1743 dev_vdbg(hsotg->dev, "no split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001744 /*
1745 * Ensure that the transfer length and packet count will fit
1746 * in the widths allocated for them in the HCTSIZn register
1747 */
1748 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1749 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1750 /*
1751 * Make sure the transfer size is no larger than one
1752 * (micro)frame's worth of data. (A check was done
1753 * when the periodic transfer was accepted to ensure
1754 * that a (micro)frame's worth of data can be
1755 * programmed into a channel.)
1756 */
1757 u32 max_periodic_len =
1758 chan->multi_count * chan->max_packet;
1759
1760 if (chan->xfer_len > max_periodic_len)
1761 chan->xfer_len = max_periodic_len;
1762 } else if (chan->xfer_len > max_hc_xfer_size) {
1763 /*
1764 * Make sure that xfer_len is a multiple of max packet
1765 * size
1766 */
1767 chan->xfer_len =
1768 max_hc_xfer_size - chan->max_packet + 1;
1769 }
1770
1771 if (chan->xfer_len > 0) {
1772 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1773 chan->max_packet;
1774 if (num_packets > max_hc_pkt_count) {
1775 num_packets = max_hc_pkt_count;
1776 chan->xfer_len = num_packets * chan->max_packet;
1777 }
1778 } else {
1779 /* Need 1 packet for transfer length of 0 */
1780 num_packets = 1;
1781 }
1782
1783 if (chan->ep_is_in)
1784 /*
1785 * Always program an integral # of max packets for IN
1786 * transfers
1787 */
1788 chan->xfer_len = num_packets * chan->max_packet;
1789
1790 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1791 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1792 /*
1793 * Make sure that the multi_count field matches the
1794 * actual transfer length
1795 */
1796 chan->multi_count = num_packets;
1797
1798 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1799 dwc2_set_pid_isoc(chan);
1800
1801 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1802 TSIZ_XFERSIZE_MASK;
1803 }
1804
1805 chan->start_pkt_count = num_packets;
1806 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1807 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1808 TSIZ_SC_MC_PID_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001809 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001810 if (dbg_hc(chan)) {
1811 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1812 hctsiz, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001813
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001814 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1815 chan->hc_num);
1816 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001817 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1818 TSIZ_XFERSIZE_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001819 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001820 (hctsiz & TSIZ_PKTCNT_MASK) >>
1821 TSIZ_PKTCNT_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001822 dev_vdbg(hsotg->dev, " Start PID: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001823 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1824 TSIZ_SC_MC_PID_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001825 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001826
1827 if (hsotg->core_params->dma_enable > 0) {
1828 dma_addr_t dma_addr;
1829
1830 if (chan->align_buf) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001831 if (dbg_hc(chan))
1832 dev_vdbg(hsotg->dev, "align_buf\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001833 dma_addr = chan->align_buf;
1834 } else {
1835 dma_addr = chan->xfer_dma;
1836 }
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001837 dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001838 if (dbg_hc(chan))
1839 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1840 (unsigned long)dma_addr, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001841 }
1842
1843 /* Start the split */
1844 if (chan->do_split) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001845 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001846
1847 hcsplt |= HCSPLT_SPLTENA;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001848 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001849 }
1850
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001851 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001852 hcchar &= ~HCCHAR_MULTICNT_MASK;
1853 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1854 HCCHAR_MULTICNT_MASK;
1855 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1856
1857 if (hcchar & HCCHAR_CHDIS)
1858 dev_warn(hsotg->dev,
1859 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1860 __func__, chan->hc_num, hcchar);
1861
1862 /* Set host channel enable after all other setup is complete */
1863 hcchar |= HCCHAR_CHENA;
1864 hcchar &= ~HCCHAR_CHDIS;
1865
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001866 if (dbg_hc(chan))
1867 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001868 (hcchar & HCCHAR_MULTICNT_MASK) >>
1869 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001870
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001871 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001872 if (dbg_hc(chan))
1873 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1874 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001875
1876 chan->xfer_started = 1;
1877 chan->requests++;
1878
1879 if (hsotg->core_params->dma_enable <= 0 &&
1880 !chan->ep_is_in && chan->xfer_len > 0)
1881 /* Load OUT packet into the appropriate Tx FIFO */
1882 dwc2_hc_write_packet(hsotg, chan);
1883}
1884
1885/**
1886 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1887 * host channel and starts the transfer in Descriptor DMA mode
1888 *
1889 * @hsotg: Programming view of DWC_otg controller
1890 * @chan: Information needed to initialize the host channel
1891 *
1892 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1893 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1894 * with micro-frame bitmap.
1895 *
1896 * Initializes HCDMA register with descriptor list address and CTD value then
1897 * starts the transfer via enabling the channel.
1898 */
1899void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1900 struct dwc2_host_chan *chan)
1901{
1902 u32 hcchar;
1903 u32 hc_dma;
1904 u32 hctsiz = 0;
1905
1906 if (chan->do_ping)
1907 hctsiz |= TSIZ_DOPNG;
1908
1909 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1910 dwc2_set_pid_isoc(chan);
1911
1912 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1913 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1914 TSIZ_SC_MC_PID_MASK;
1915
1916 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1917 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1918
1919 /* Non-zero only for high-speed interrupt endpoints */
1920 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1921
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001922 if (dbg_hc(chan)) {
1923 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1924 chan->hc_num);
1925 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1926 chan->data_pid_start);
1927 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1928 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001929
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001930 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001931
1932 hc_dma = (u32)chan->desc_list_addr & HCDMA_DMA_ADDR_MASK;
1933
1934 /* Always start from first descriptor */
1935 hc_dma &= ~HCDMA_CTD_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001936 dwc2_writel(hc_dma, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001937 if (dbg_hc(chan))
1938 dev_vdbg(hsotg->dev, "Wrote %08x to HCDMA(%d)\n",
1939 hc_dma, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001940
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001941 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001942 hcchar &= ~HCCHAR_MULTICNT_MASK;
1943 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1944 HCCHAR_MULTICNT_MASK;
1945
1946 if (hcchar & HCCHAR_CHDIS)
1947 dev_warn(hsotg->dev,
1948 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1949 __func__, chan->hc_num, hcchar);
1950
1951 /* Set host channel enable after all other setup is complete */
1952 hcchar |= HCCHAR_CHENA;
1953 hcchar &= ~HCCHAR_CHDIS;
1954
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001955 if (dbg_hc(chan))
1956 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001957 (hcchar & HCCHAR_MULTICNT_MASK) >>
1958 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001959
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001960 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001961 if (dbg_hc(chan))
1962 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1963 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001964
1965 chan->xfer_started = 1;
1966 chan->requests++;
1967}
1968
1969/**
1970 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
1971 * a previous call to dwc2_hc_start_transfer()
1972 *
1973 * @hsotg: Programming view of DWC_otg controller
1974 * @chan: Information needed to initialize the host channel
1975 *
1976 * The caller must ensure there is sufficient space in the request queue and Tx
1977 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
1978 * the controller acts autonomously to complete transfers programmed to a host
1979 * channel.
1980 *
1981 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1982 * if there is any data remaining to be queued. For an IN transfer, another
1983 * data packet is always requested. For the SETUP phase of a control transfer,
1984 * this function does nothing.
1985 *
1986 * Return: 1 if a new request is queued, 0 if no more requests are required
1987 * for this transfer
1988 */
1989int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
1990 struct dwc2_host_chan *chan)
1991{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001992 if (dbg_hc(chan))
1993 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1994 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001995
1996 if (chan->do_split)
1997 /* SPLITs always queue just once per channel */
1998 return 0;
1999
2000 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
2001 /* SETUPs are queued only once since they can't be NAK'd */
2002 return 0;
2003
2004 if (chan->ep_is_in) {
2005 /*
2006 * Always queue another request for other IN transfers. If
2007 * back-to-back INs are issued and NAKs are received for both,
2008 * the driver may still be processing the first NAK when the
2009 * second NAK is received. When the interrupt handler clears
2010 * the NAK interrupt for the first NAK, the second NAK will
2011 * not be seen. So we can't depend on the NAK interrupt
2012 * handler to requeue a NAK'd request. Instead, IN requests
2013 * are issued each time this function is called. When the
2014 * transfer completes, the extra requests for the channel will
2015 * be flushed.
2016 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002017 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002018
2019 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2020 hcchar |= HCCHAR_CHENA;
2021 hcchar &= ~HCCHAR_CHDIS;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002022 if (dbg_hc(chan))
2023 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
2024 hcchar);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002025 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002026 chan->requests++;
2027 return 1;
2028 }
2029
2030 /* OUT transfers */
2031
2032 if (chan->xfer_count < chan->xfer_len) {
2033 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2034 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002035 u32 hcchar = dwc2_readl(hsotg->regs +
2036 HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002037
2038 dwc2_hc_set_even_odd_frame(hsotg, chan,
2039 &hcchar);
2040 }
2041
2042 /* Load OUT packet into the appropriate Tx FIFO */
2043 dwc2_hc_write_packet(hsotg, chan);
2044 chan->requests++;
2045 return 1;
2046 }
2047
2048 return 0;
2049}
2050
2051/**
2052 * dwc2_hc_do_ping() - Starts a PING transfer
2053 *
2054 * @hsotg: Programming view of DWC_otg controller
2055 * @chan: Information needed to initialize the host channel
2056 *
2057 * This function should only be called in Slave mode. The Do Ping bit is set in
2058 * the HCTSIZ register, then the channel is enabled.
2059 */
2060void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
2061{
2062 u32 hcchar;
2063 u32 hctsiz;
2064
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002065 if (dbg_hc(chan))
2066 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2067 chan->hc_num);
2068
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002069
2070 hctsiz = TSIZ_DOPNG;
2071 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002072 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002073
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002074 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002075 hcchar |= HCCHAR_CHENA;
2076 hcchar &= ~HCCHAR_CHDIS;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002077 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002078}
2079
2080/**
2081 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2082 * the HFIR register according to PHY type and speed
2083 *
2084 * @hsotg: Programming view of DWC_otg controller
2085 *
2086 * NOTE: The caller can modify the value of the HFIR register only after the
2087 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2088 * has been set
2089 */
2090u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
2091{
2092 u32 usbcfg;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002093 u32 hprt0;
2094 int clock = 60; /* default value */
2095
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002096 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2097 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002098
2099 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
2100 !(usbcfg & GUSBCFG_PHYIF16))
2101 clock = 60;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002102 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002103 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
2104 clock = 48;
2105 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2106 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2107 clock = 30;
2108 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2109 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
2110 clock = 60;
2111 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2112 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2113 clock = 48;
2114 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002115 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002116 clock = 48;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002117 if ((usbcfg & GUSBCFG_PHYSEL) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002118 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002119 clock = 48;
2120
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002121 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002122 /* High speed case */
2123 return 125 * clock;
2124 else
2125 /* FS/LS case */
2126 return 1000 * clock;
2127}
2128
2129/**
2130 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2131 * buffer
2132 *
2133 * @core_if: Programming view of DWC_otg controller
2134 * @dest: Destination buffer for the packet
2135 * @bytes: Number of bytes to copy to the destination
2136 */
2137void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
2138{
2139 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
2140 u32 *data_buf = (u32 *)dest;
2141 int word_count = (bytes + 3) / 4;
2142 int i;
2143
2144 /*
2145 * Todo: Account for the case where dest is not dword aligned. This
2146 * requires reading data from the FIFO into a u32 temp buffer, then
2147 * moving it into the data buffer.
2148 */
2149
2150 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
2151
2152 for (i = 0; i < word_count; i++, data_buf++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002153 *data_buf = dwc2_readl(fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002154}
2155
2156/**
2157 * dwc2_dump_host_registers() - Prints the host registers
2158 *
2159 * @hsotg: Programming view of DWC_otg controller
2160 *
2161 * NOTE: This function will be removed once the peripheral controller code
2162 * is integrated and the driver is stable
2163 */
2164void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
2165{
2166#ifdef DEBUG
2167 u32 __iomem *addr;
2168 int i;
2169
2170 dev_dbg(hsotg->dev, "Host Global Registers\n");
2171 addr = hsotg->regs + HCFG;
2172 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002173 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002174 addr = hsotg->regs + HFIR;
2175 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002176 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002177 addr = hsotg->regs + HFNUM;
2178 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002179 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002180 addr = hsotg->regs + HPTXSTS;
2181 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002182 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002183 addr = hsotg->regs + HAINT;
2184 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002185 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002186 addr = hsotg->regs + HAINTMSK;
2187 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002188 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002189 if (hsotg->core_params->dma_desc_enable > 0) {
2190 addr = hsotg->regs + HFLBADDR;
2191 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002192 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002193 }
2194
2195 addr = hsotg->regs + HPRT0;
2196 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002197 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002198
2199 for (i = 0; i < hsotg->core_params->host_channels; i++) {
2200 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
2201 addr = hsotg->regs + HCCHAR(i);
2202 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002203 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002204 addr = hsotg->regs + HCSPLT(i);
2205 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002206 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002207 addr = hsotg->regs + HCINT(i);
2208 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002209 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002210 addr = hsotg->regs + HCINTMSK(i);
2211 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002212 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002213 addr = hsotg->regs + HCTSIZ(i);
2214 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002215 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002216 addr = hsotg->regs + HCDMA(i);
2217 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002218 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002219 if (hsotg->core_params->dma_desc_enable > 0) {
2220 addr = hsotg->regs + HCDMAB(i);
2221 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002222 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002223 }
2224 }
2225#endif
2226}
2227
2228/**
2229 * dwc2_dump_global_registers() - Prints the core global registers
2230 *
2231 * @hsotg: Programming view of DWC_otg controller
2232 *
2233 * NOTE: This function will be removed once the peripheral controller code
2234 * is integrated and the driver is stable
2235 */
2236void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
2237{
2238#ifdef DEBUG
2239 u32 __iomem *addr;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002240
2241 dev_dbg(hsotg->dev, "Core Global Registers\n");
2242 addr = hsotg->regs + GOTGCTL;
2243 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002244 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002245 addr = hsotg->regs + GOTGINT;
2246 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002247 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002248 addr = hsotg->regs + GAHBCFG;
2249 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002250 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002251 addr = hsotg->regs + GUSBCFG;
2252 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002253 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002254 addr = hsotg->regs + GRSTCTL;
2255 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002256 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002257 addr = hsotg->regs + GINTSTS;
2258 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002259 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002260 addr = hsotg->regs + GINTMSK;
2261 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002262 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002263 addr = hsotg->regs + GRXSTSR;
2264 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002265 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002266 addr = hsotg->regs + GRXFSIZ;
2267 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002268 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002269 addr = hsotg->regs + GNPTXFSIZ;
2270 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002271 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002272 addr = hsotg->regs + GNPTXSTS;
2273 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002274 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002275 addr = hsotg->regs + GI2CCTL;
2276 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002277 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002278 addr = hsotg->regs + GPVNDCTL;
2279 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002280 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002281 addr = hsotg->regs + GGPIO;
2282 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002283 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002284 addr = hsotg->regs + GUID;
2285 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002286 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002287 addr = hsotg->regs + GSNPSID;
2288 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002289 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002290 addr = hsotg->regs + GHWCFG1;
2291 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002292 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002293 addr = hsotg->regs + GHWCFG2;
2294 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002295 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002296 addr = hsotg->regs + GHWCFG3;
2297 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002298 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002299 addr = hsotg->regs + GHWCFG4;
2300 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002301 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002302 addr = hsotg->regs + GLPMCFG;
2303 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002304 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002305 addr = hsotg->regs + GPWRDN;
2306 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002307 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002308 addr = hsotg->regs + GDFIFOCFG;
2309 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002310 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002311 addr = hsotg->regs + HPTXFSIZ;
2312 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002313 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002314
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002315 addr = hsotg->regs + PCGCTL;
2316 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002317 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002318#endif
2319}
2320
2321/**
2322 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2323 *
2324 * @hsotg: Programming view of DWC_otg controller
2325 * @num: Tx FIFO to flush
2326 */
2327void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
2328{
2329 u32 greset;
2330 int count = 0;
2331
2332 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
2333
2334 greset = GRSTCTL_TXFFLSH;
2335 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002336 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002337
2338 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002339 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002340 if (++count > 10000) {
2341 dev_warn(hsotg->dev,
2342 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2343 __func__, greset,
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002344 dwc2_readl(hsotg->regs + GNPTXSTS));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002345 break;
2346 }
2347 udelay(1);
2348 } while (greset & GRSTCTL_TXFFLSH);
2349
2350 /* Wait for at least 3 PHY Clocks */
2351 udelay(1);
2352}
2353
2354/**
2355 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2356 *
2357 * @hsotg: Programming view of DWC_otg controller
2358 */
2359void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
2360{
2361 u32 greset;
2362 int count = 0;
2363
2364 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2365
2366 greset = GRSTCTL_RXFFLSH;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002367 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002368
2369 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002370 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002371 if (++count > 10000) {
2372 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2373 __func__, greset);
2374 break;
2375 }
2376 udelay(1);
2377 } while (greset & GRSTCTL_RXFFLSH);
2378
2379 /* Wait for at least 3 PHY Clocks */
2380 udelay(1);
2381}
2382
Paul Zimmerman498f0662013-11-22 16:43:47 -08002383#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002384
2385/* Parameter access functions */
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002386void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002387{
2388 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002389
2390 switch (val) {
2391 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002392 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002393 valid = 0;
2394 break;
2395 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002396 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002397 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2398 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2399 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2400 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2401 break;
2402 default:
2403 valid = 0;
2404 break;
2405 }
2406 break;
2407 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2408 /* always valid */
2409 break;
2410 default:
2411 valid = 0;
2412 break;
2413 }
2414
2415 if (!valid) {
2416 if (val >= 0)
2417 dev_err(hsotg->dev,
2418 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2419 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002420 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002421 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2422 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2423 break;
2424 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2425 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2426 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2427 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2428 break;
2429 default:
2430 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2431 break;
2432 }
2433 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002434 }
2435
2436 hsotg->core_params->otg_cap = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002437}
2438
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002439void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002440{
2441 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002442
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002443 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002444 valid = 0;
2445 if (val < 0)
2446 valid = 0;
2447
2448 if (!valid) {
2449 if (val >= 0)
2450 dev_err(hsotg->dev,
2451 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2452 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002453 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002454 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002455 }
2456
2457 hsotg->core_params->dma_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002458}
2459
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002460void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002461{
2462 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002463
2464 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002465 !hsotg->hw_params.dma_desc_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002466 valid = 0;
2467 if (val < 0)
2468 valid = 0;
2469
2470 if (!valid) {
2471 if (val >= 0)
2472 dev_err(hsotg->dev,
2473 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2474 val);
2475 val = (hsotg->core_params->dma_enable > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002476 hsotg->hw_params.dma_desc_enable);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002477 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002478 }
2479
2480 hsotg->core_params->dma_desc_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002481}
2482
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002483void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2484 int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002485{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002486 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002487 if (val >= 0) {
2488 dev_err(hsotg->dev,
2489 "Wrong value for host_support_fs_low_power\n");
2490 dev_err(hsotg->dev,
2491 "host_support_fs_low_power must be 0 or 1\n");
2492 }
2493 val = 0;
2494 dev_dbg(hsotg->dev,
2495 "Setting host_support_fs_low_power to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002496 }
2497
2498 hsotg->core_params->host_support_fs_ls_low_power = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002499}
2500
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002501void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002502{
2503 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002504
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002505 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002506 valid = 0;
2507 if (val < 0)
2508 valid = 0;
2509
2510 if (!valid) {
2511 if (val >= 0)
2512 dev_err(hsotg->dev,
2513 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2514 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002515 val = hsotg->hw_params.enable_dynamic_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002516 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002517 }
2518
2519 hsotg->core_params->enable_dynamic_fifo = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002520}
2521
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002522void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002523{
2524 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002525
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002526 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002527 valid = 0;
2528
2529 if (!valid) {
2530 if (val >= 0)
2531 dev_err(hsotg->dev,
2532 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2533 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002534 val = hsotg->hw_params.host_rx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002535 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002536 }
2537
2538 hsotg->core_params->host_rx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002539}
2540
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002541void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002542{
2543 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002544
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002545 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002546 valid = 0;
2547
2548 if (!valid) {
2549 if (val >= 0)
2550 dev_err(hsotg->dev,
2551 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2552 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002553 val = hsotg->hw_params.host_nperio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002554 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2555 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002556 }
2557
2558 hsotg->core_params->host_nperio_tx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002559}
2560
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002561void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002562{
2563 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002564
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002565 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002566 valid = 0;
2567
2568 if (!valid) {
2569 if (val >= 0)
2570 dev_err(hsotg->dev,
2571 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2572 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002573 val = hsotg->hw_params.host_perio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002574 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2575 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002576 }
2577
2578 hsotg->core_params->host_perio_tx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002579}
2580
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002581void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002582{
2583 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002584
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002585 if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002586 valid = 0;
2587
2588 if (!valid) {
2589 if (val >= 0)
2590 dev_err(hsotg->dev,
2591 "%d invalid for max_transfer_size. Check HW configuration.\n",
2592 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002593 val = hsotg->hw_params.max_transfer_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002594 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002595 }
2596
2597 hsotg->core_params->max_transfer_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002598}
2599
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002600void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002601{
2602 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002603
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002604 if (val < 15 || val > hsotg->hw_params.max_packet_count)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002605 valid = 0;
2606
2607 if (!valid) {
2608 if (val >= 0)
2609 dev_err(hsotg->dev,
2610 "%d invalid for max_packet_count. Check HW configuration.\n",
2611 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002612 val = hsotg->hw_params.max_packet_count;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002613 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002614 }
2615
2616 hsotg->core_params->max_packet_count = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002617}
2618
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002619void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002620{
2621 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002622
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002623 if (val < 1 || val > hsotg->hw_params.host_channels)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002624 valid = 0;
2625
2626 if (!valid) {
2627 if (val >= 0)
2628 dev_err(hsotg->dev,
2629 "%d invalid for host_channels. Check HW configuration.\n",
2630 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002631 val = hsotg->hw_params.host_channels;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002632 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002633 }
2634
2635 hsotg->core_params->host_channels = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002636}
2637
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002638void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002639{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002640 int valid = 0;
Luis Ortega Perez de Villar0464a3d2013-09-25 13:10:50 +02002641 u32 hs_phy_type, fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002642
Paul Zimmerman498f0662013-11-22 16:43:47 -08002643 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2644 DWC2_PHY_TYPE_PARAM_ULPI)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002645 if (val >= 0) {
2646 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2647 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2648 }
2649
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002650 valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002651 }
2652
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002653 hs_phy_type = hsotg->hw_params.hs_phy_type;
2654 fs_phy_type = hsotg->hw_params.fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002655 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2656 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2657 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2658 valid = 1;
2659 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2660 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2661 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2662 valid = 1;
2663 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2664 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2665 valid = 1;
2666
2667 if (!valid) {
2668 if (val >= 0)
2669 dev_err(hsotg->dev,
2670 "%d invalid for phy_type. Check HW configuration.\n",
2671 val);
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002672 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002673 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2674 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2675 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2676 val = DWC2_PHY_TYPE_PARAM_UTMI;
2677 else
2678 val = DWC2_PHY_TYPE_PARAM_ULPI;
2679 }
2680 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002681 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002682
2683 hsotg->core_params->phy_type = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002684}
2685
2686static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2687{
2688 return hsotg->core_params->phy_type;
2689}
2690
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002691void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002692{
2693 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002694
Paul Zimmerman498f0662013-11-22 16:43:47 -08002695 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002696 if (val >= 0) {
2697 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2698 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2699 }
2700 valid = 0;
2701 }
2702
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002703 if (val == DWC2_SPEED_PARAM_HIGH &&
2704 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002705 valid = 0;
2706
2707 if (!valid) {
2708 if (val >= 0)
2709 dev_err(hsotg->dev,
2710 "%d invalid for speed parameter. Check HW configuration.\n",
2711 val);
2712 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002713 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002714 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002715 }
2716
2717 hsotg->core_params->speed = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002718}
2719
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002720void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002721{
2722 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002723
Paul Zimmerman498f0662013-11-22 16:43:47 -08002724 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2725 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002726 if (val >= 0) {
2727 dev_err(hsotg->dev,
2728 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2729 dev_err(hsotg->dev,
2730 "host_ls_low_power_phy_clk must be 0 or 1\n");
2731 }
2732 valid = 0;
2733 }
2734
2735 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2736 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2737 valid = 0;
2738
2739 if (!valid) {
2740 if (val >= 0)
2741 dev_err(hsotg->dev,
2742 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2743 val);
2744 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2745 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2746 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2747 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2748 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002749 }
2750
2751 hsotg->core_params->host_ls_low_power_phy_clk = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002752}
2753
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002754void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002755{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002756 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002757 if (val >= 0) {
2758 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2759 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2760 }
2761 val = 0;
2762 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002763 }
2764
2765 hsotg->core_params->phy_ulpi_ddr = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002766}
2767
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002768void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002769{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002770 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002771 if (val >= 0) {
2772 dev_err(hsotg->dev,
2773 "Wrong value for phy_ulpi_ext_vbus\n");
2774 dev_err(hsotg->dev,
2775 "phy_ulpi_ext_vbus must be 0 or 1\n");
2776 }
2777 val = 0;
2778 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002779 }
2780
2781 hsotg->core_params->phy_ulpi_ext_vbus = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002782}
2783
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002784void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002785{
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002786 int valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002787
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002788 switch (hsotg->hw_params.utmi_phy_data_width) {
2789 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2790 valid = (val == 8);
2791 break;
2792 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2793 valid = (val == 16);
2794 break;
2795 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2796 valid = (val == 8 || val == 16);
2797 break;
2798 }
2799
2800 if (!valid) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002801 if (val >= 0) {
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002802 dev_err(hsotg->dev,
2803 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2804 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002805 }
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002806 val = (hsotg->hw_params.utmi_phy_data_width ==
2807 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002808 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002809 }
2810
2811 hsotg->core_params->phy_utmi_width = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002812}
2813
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002814void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002815{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002816 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002817 if (val >= 0) {
2818 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2819 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2820 }
2821 val = 0;
2822 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002823 }
2824
2825 hsotg->core_params->ulpi_fs_ls = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002826}
2827
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002828void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002829{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002830 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002831 if (val >= 0) {
2832 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2833 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2834 }
2835 val = 0;
2836 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002837 }
2838
2839 hsotg->core_params->ts_dline = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002840}
2841
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002842void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002843{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002844 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002845
Paul Zimmerman498f0662013-11-22 16:43:47 -08002846 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002847 if (val >= 0) {
2848 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2849 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2850 }
2851
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002852 valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002853 }
2854
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002855 if (val == 1 && !(hsotg->hw_params.i2c_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002856 valid = 0;
2857
2858 if (!valid) {
2859 if (val >= 0)
2860 dev_err(hsotg->dev,
2861 "%d invalid for i2c_enable. Check HW configuration.\n",
2862 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002863 val = hsotg->hw_params.i2c_enable;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002864 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002865 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002866
2867 hsotg->core_params->i2c_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002868}
2869
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002870void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002871{
2872 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002873
Paul Zimmerman498f0662013-11-22 16:43:47 -08002874 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002875 if (val >= 0) {
2876 dev_err(hsotg->dev,
2877 "Wrong value for en_multiple_tx_fifo,\n");
2878 dev_err(hsotg->dev,
2879 "en_multiple_tx_fifo must be 0 or 1\n");
2880 }
2881 valid = 0;
2882 }
2883
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002884 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002885 valid = 0;
2886
2887 if (!valid) {
2888 if (val >= 0)
2889 dev_err(hsotg->dev,
2890 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2891 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002892 val = hsotg->hw_params.en_multiple_tx_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002893 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002894 }
2895
2896 hsotg->core_params->en_multiple_tx_fifo = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002897}
2898
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002899void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002900{
2901 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002902
Paul Zimmerman498f0662013-11-22 16:43:47 -08002903 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002904 if (val >= 0) {
2905 dev_err(hsotg->dev,
2906 "'%d' invalid for parameter reload_ctl\n", val);
2907 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2908 }
2909 valid = 0;
2910 }
2911
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002912 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002913 valid = 0;
2914
2915 if (!valid) {
2916 if (val >= 0)
2917 dev_err(hsotg->dev,
2918 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2919 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002920 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002921 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002922 }
2923
2924 hsotg->core_params->reload_ctl = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002925}
2926
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002927void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002928{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002929 if (val != -1)
2930 hsotg->core_params->ahbcfg = val;
2931 else
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002932 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
Luis Ortega Perez de Villar0464a3d2013-09-25 13:10:50 +02002933 GAHBCFG_HBSTLEN_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002934}
2935
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002936void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002937{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002938 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002939 if (val >= 0) {
2940 dev_err(hsotg->dev,
2941 "'%d' invalid for parameter otg_ver\n", val);
2942 dev_err(hsotg->dev,
2943 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
2944 }
2945 val = 0;
2946 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002947 }
2948
2949 hsotg->core_params->otg_ver = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002950}
2951
Wei Yongjun49cf10c2013-11-28 10:27:59 +08002952static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
Paul Zimmermane8576e62013-11-25 13:42:47 -08002953{
2954 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2955 if (val >= 0) {
2956 dev_err(hsotg->dev,
2957 "'%d' invalid for parameter uframe_sched\n",
2958 val);
2959 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
2960 }
2961 val = 1;
2962 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
2963 }
2964
2965 hsotg->core_params->uframe_sched = val;
2966}
2967
Gregory Herreroa6d249d2015-04-29 22:09:04 +02002968static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
2969 int val)
2970{
2971 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2972 if (val >= 0) {
2973 dev_err(hsotg->dev,
2974 "'%d' invalid for parameter external_id_pin_ctl\n",
2975 val);
2976 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
2977 }
2978 val = 0;
2979 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
2980 }
2981
2982 hsotg->core_params->external_id_pin_ctl = val;
2983}
2984
Gregory Herrero285046a2015-04-29 22:09:19 +02002985static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
2986 int val)
2987{
2988 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
2989 if (val >= 0) {
2990 dev_err(hsotg->dev,
2991 "'%d' invalid for parameter hibernation\n",
2992 val);
2993 dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
2994 }
2995 val = 0;
2996 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
2997 }
2998
2999 hsotg->core_params->hibernation = val;
3000}
3001
Paul Zimmermane8576e62013-11-25 13:42:47 -08003002/*
3003 * This function is called during module intialization to pass module parameters
3004 * for the DWC_otg core.
3005 */
3006void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
3007 const struct dwc2_core_params *params)
3008{
3009 dev_dbg(hsotg->dev, "%s()\n", __func__);
3010
3011 dwc2_set_param_otg_cap(hsotg, params->otg_cap);
3012 dwc2_set_param_dma_enable(hsotg, params->dma_enable);
3013 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
3014 dwc2_set_param_host_support_fs_ls_low_power(hsotg,
3015 params->host_support_fs_ls_low_power);
3016 dwc2_set_param_enable_dynamic_fifo(hsotg,
3017 params->enable_dynamic_fifo);
3018 dwc2_set_param_host_rx_fifo_size(hsotg,
3019 params->host_rx_fifo_size);
3020 dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
3021 params->host_nperio_tx_fifo_size);
3022 dwc2_set_param_host_perio_tx_fifo_size(hsotg,
3023 params->host_perio_tx_fifo_size);
3024 dwc2_set_param_max_transfer_size(hsotg,
3025 params->max_transfer_size);
3026 dwc2_set_param_max_packet_count(hsotg,
3027 params->max_packet_count);
3028 dwc2_set_param_host_channels(hsotg, params->host_channels);
3029 dwc2_set_param_phy_type(hsotg, params->phy_type);
3030 dwc2_set_param_speed(hsotg, params->speed);
3031 dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
3032 params->host_ls_low_power_phy_clk);
3033 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
3034 dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
3035 params->phy_ulpi_ext_vbus);
3036 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
3037 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
3038 dwc2_set_param_ts_dline(hsotg, params->ts_dline);
3039 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
3040 dwc2_set_param_en_multiple_tx_fifo(hsotg,
3041 params->en_multiple_tx_fifo);
3042 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
3043 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
3044 dwc2_set_param_otg_ver(hsotg, params->otg_ver);
3045 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
Gregory Herreroa6d249d2015-04-29 22:09:04 +02003046 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
Gregory Herrero285046a2015-04-29 22:09:19 +02003047 dwc2_set_param_hibernation(hsotg, params->hibernation);
Paul Zimmermane8576e62013-11-25 13:42:47 -08003048}
3049
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003050/**
3051 * During device initialization, read various hardware configuration
3052 * registers and interpret the contents.
3053 */
3054int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3055{
3056 struct dwc2_hw_params *hw = &hsotg->hw_params;
3057 unsigned width;
3058 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
3059 u32 hptxfsiz, grxfsiz, gnptxfsiz;
3060 u32 gusbcfg;
3061
3062 /*
3063 * Attempt to ensure this device is really a DWC_otg Controller.
3064 * Read and verify the GSNPSID register contents. The value should be
3065 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3066 * as in "OTG version 2.xx" or "OTG version 3.xx".
3067 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003068 hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003069 if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
3070 (hw->snpsid & 0xfffff000) != 0x4f543000) {
3071 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
3072 hw->snpsid);
3073 return -ENODEV;
3074 }
3075
3076 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3077 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
3078 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
3079
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003080 hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
3081 hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
3082 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
3083 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
3084 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003085
3086 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
3087 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
3088 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
3089 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003090 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
3091
Doug Anderson2867c052014-08-07 12:48:11 -07003092 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003093 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003094 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003095 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003096 usleep_range(100000, 150000);
3097
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003098 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
3099 hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
Doug Anderson2867c052014-08-07 12:48:11 -07003100 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003101 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003102 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003103 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003104 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003105 usleep_range(100000, 150000);
3106
3107 /* hwcfg2 */
3108 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3109 GHWCFG2_OP_MODE_SHIFT;
3110 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
3111 GHWCFG2_ARCHITECTURE_SHIFT;
3112 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
3113 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
3114 GHWCFG2_NUM_HOST_CHAN_SHIFT);
3115 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
3116 GHWCFG2_HS_PHY_TYPE_SHIFT;
3117 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
3118 GHWCFG2_FS_PHY_TYPE_SHIFT;
3119 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
3120 GHWCFG2_NUM_DEV_EP_SHIFT;
3121 hw->nperio_tx_q_depth =
3122 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
3123 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
3124 hw->host_perio_tx_q_depth =
3125 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
3126 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
3127 hw->dev_token_q_depth =
3128 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
3129 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
3130
3131 /* hwcfg3 */
3132 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
3133 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
3134 hw->max_transfer_size = (1 << (width + 11)) - 1;
Paul Zimmermane8f8c142014-09-16 13:47:26 -07003135 /*
3136 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3137 * coherent buffers with this size, and if it's too large we can
3138 * exhaust the coherent DMA pool.
3139 */
3140 if (hw->max_transfer_size > 65535)
3141 hw->max_transfer_size = 65535;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003142 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
3143 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
3144 hw->max_packet_count = (1 << (width + 4)) - 1;
3145 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
3146 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
3147 GHWCFG3_DFIFO_DEPTH_SHIFT;
3148
3149 /* hwcfg4 */
3150 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
3151 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
3152 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
3153 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
3154 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02003155 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
3156 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003157
3158 /* fifo sizes */
3159 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
3160 GRXFSIZ_DEPTH_SHIFT;
3161 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3162 FIFOSIZE_DEPTH_SHIFT;
3163 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3164 FIFOSIZE_DEPTH_SHIFT;
3165
3166 dev_dbg(hsotg->dev, "Detected values from hardware:\n");
3167 dev_dbg(hsotg->dev, " op_mode=%d\n",
3168 hw->op_mode);
3169 dev_dbg(hsotg->dev, " arch=%d\n",
3170 hw->arch);
3171 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
3172 hw->dma_desc_enable);
3173 dev_dbg(hsotg->dev, " power_optimized=%d\n",
3174 hw->power_optimized);
3175 dev_dbg(hsotg->dev, " i2c_enable=%d\n",
3176 hw->i2c_enable);
3177 dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
3178 hw->hs_phy_type);
3179 dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
3180 hw->fs_phy_type);
Masanari Iida971bd8f2015-05-20 23:54:02 +09003181 dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02003182 hw->utmi_phy_data_width);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003183 dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
3184 hw->num_dev_ep);
3185 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
3186 hw->num_dev_perio_in_ep);
3187 dev_dbg(hsotg->dev, " host_channels=%d\n",
3188 hw->host_channels);
3189 dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
3190 hw->max_transfer_size);
3191 dev_dbg(hsotg->dev, " max_packet_count=%d\n",
3192 hw->max_packet_count);
3193 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
3194 hw->nperio_tx_q_depth);
3195 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
3196 hw->host_perio_tx_q_depth);
3197 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
3198 hw->dev_token_q_depth);
3199 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
3200 hw->enable_dynamic_fifo);
3201 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
3202 hw->en_multiple_tx_fifo);
3203 dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
3204 hw->total_fifo_size);
3205 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
3206 hw->host_rx_fifo_size);
3207 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
3208 hw->host_nperio_tx_fifo_size);
3209 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
3210 hw->host_perio_tx_fifo_size);
3211 dev_dbg(hsotg->dev, "\n");
3212
3213 return 0;
3214}
Mian Yousaf Kaukabecb176c2015-04-29 22:09:05 +02003215
3216/*
3217 * Sets all parameters to the given value.
3218 *
3219 * Assumes that the dwc2_core_params struct contains only integers.
3220 */
3221void dwc2_set_all_params(struct dwc2_core_params *params, int value)
3222{
3223 int *p = (int *)params;
3224 size_t size = sizeof(*params) / sizeof(*p);
3225 int i;
3226
3227 for (i = 0; i < size; i++)
3228 p[i] = value;
3229}
Mian Yousaf Kaukabecb176c2015-04-29 22:09:05 +02003230
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003231
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003232u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
3233{
Paul Zimmermanb66a3f02013-11-22 16:43:50 -08003234 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003235}
3236
Paul Zimmerman057715f2013-11-22 16:43:51 -08003237bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003238{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003239 if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff)
Paul Zimmerman057715f2013-11-22 16:43:51 -08003240 return false;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003241 else
Paul Zimmerman057715f2013-11-22 16:43:51 -08003242 return true;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003243}
3244
3245/**
3246 * dwc2_enable_global_interrupts() - Enables the controller's Global
3247 * Interrupt in the AHB Config register
3248 *
3249 * @hsotg: Programming view of DWC_otg controller
3250 */
3251void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
3252{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003253 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003254
3255 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003256 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003257}
3258
3259/**
3260 * dwc2_disable_global_interrupts() - Disables the controller's Global
3261 * Interrupt in the AHB Config register
3262 *
3263 * @hsotg: Programming view of DWC_otg controller
3264 */
3265void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
3266{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003267 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003268
3269 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003270 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003271}
3272
3273MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
3274MODULE_AUTHOR("Synopsys, Inc.");
3275MODULE_LICENSE("Dual BSD/GPL");