blob: b700a47e026a6e406b1a2ff6e635ea42d7c4f4c4 [file] [log] [blame]
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001/*
2 * core.c - DesignWare HS OTG Controller common routines
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/*
38 * The Core code provides basic services for accessing and managing the
39 * DWC_otg hardware. These services are used by both the Host Controller
40 * Driver and the Peripheral Controller Driver.
41 */
42#include <linux/kernel.h>
43#include <linux/module.h>
44#include <linux/moduleparam.h>
45#include <linux/spinlock.h>
46#include <linux/interrupt.h>
47#include <linux/dma-mapping.h>
48#include <linux/delay.h>
49#include <linux/io.h>
50#include <linux/slab.h>
51#include <linux/usb.h>
52
53#include <linux/usb/hcd.h>
54#include <linux/usb/ch11.h>
55
56#include "core.h"
57#include "hcd.h"
58
Gregory Herrerod17ee772015-04-29 22:09:01 +020059#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
60/**
61 * dwc2_backup_host_registers() - Backup controller host registers.
62 * When suspending usb bus, registers needs to be backuped
63 * if controller power is disabled once suspended.
64 *
65 * @hsotg: Programming view of the DWC_otg controller
66 */
67static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
68{
69 struct dwc2_hregs_backup *hr;
70 int i;
71
72 dev_dbg(hsotg->dev, "%s\n", __func__);
73
74 /* Backup Host regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +020075 hr = &hsotg->hr_backup;
Antti Seppälä95c8bc32015-08-20 21:41:07 +030076 hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
77 hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +020078 for (i = 0; i < hsotg->core_params->host_channels; ++i)
Antti Seppälä95c8bc32015-08-20 21:41:07 +030079 hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +020080
Gregory Herrerocc047ce2015-09-22 15:16:37 +020081 hr->hprt0 = dwc2_read_hprt0(hsotg);
Antti Seppälä95c8bc32015-08-20 21:41:07 +030082 hr->hfir = dwc2_readl(hsotg->regs + HFIR);
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +020083 hr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +020084
85 return 0;
86}
87
88/**
89 * dwc2_restore_host_registers() - Restore controller host registers.
90 * When resuming usb bus, device registers needs to be restored
91 * if controller power were disabled.
92 *
93 * @hsotg: Programming view of the DWC_otg controller
94 */
95static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
96{
97 struct dwc2_hregs_backup *hr;
98 int i;
99
100 dev_dbg(hsotg->dev, "%s\n", __func__);
101
102 /* Restore host regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200103 hr = &hsotg->hr_backup;
104 if (!hr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200105 dev_err(hsotg->dev, "%s: no host registers to restore\n",
106 __func__);
107 return -EINVAL;
108 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200109 hr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200110
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300111 dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
112 dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200113
114 for (i = 0; i < hsotg->core_params->host_channels; ++i)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300115 dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200116
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300117 dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
118 dwc2_writel(hr->hfir, hsotg->regs + HFIR);
Gregory Herrero08c4ffc2015-09-22 15:16:45 +0200119 hsotg->frame_number = 0;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200120
121 return 0;
122}
123#else
124static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
125{ return 0; }
126
127static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
128{ return 0; }
129#endif
130
131#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
132 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
133/**
134 * dwc2_backup_device_registers() - Backup controller device registers.
135 * When suspending usb bus, registers needs to be backuped
136 * if controller power is disabled once suspended.
137 *
138 * @hsotg: Programming view of the DWC_otg controller
139 */
140static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
141{
142 struct dwc2_dregs_backup *dr;
143 int i;
144
145 dev_dbg(hsotg->dev, "%s\n", __func__);
146
147 /* Backup dev regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200148 dr = &hsotg->dr_backup;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200149
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300150 dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
151 dr->dctl = dwc2_readl(hsotg->regs + DCTL);
152 dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
153 dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
154 dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200155
156 for (i = 0; i < hsotg->num_of_eps; i++) {
157 /* Backup IN EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300158 dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200159
160 /* Ensure DATA PID is correctly configured */
161 if (dr->diepctl[i] & DXEPCTL_DPID)
162 dr->diepctl[i] |= DXEPCTL_SETD1PID;
163 else
164 dr->diepctl[i] |= DXEPCTL_SETD0PID;
165
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300166 dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
167 dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200168
169 /* Backup OUT EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300170 dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200171
172 /* Ensure DATA PID is correctly configured */
173 if (dr->doepctl[i] & DXEPCTL_DPID)
174 dr->doepctl[i] |= DXEPCTL_SETD1PID;
175 else
176 dr->doepctl[i] |= DXEPCTL_SETD0PID;
177
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300178 dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
179 dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200180 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200181 dr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200182 return 0;
183}
184
185/**
186 * dwc2_restore_device_registers() - Restore controller device registers.
187 * When resuming usb bus, device registers needs to be restored
188 * if controller power were disabled.
189 *
190 * @hsotg: Programming view of the DWC_otg controller
191 */
192static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
193{
194 struct dwc2_dregs_backup *dr;
195 u32 dctl;
196 int i;
197
198 dev_dbg(hsotg->dev, "%s\n", __func__);
199
200 /* Restore dev regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200201 dr = &hsotg->dr_backup;
202 if (!dr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200203 dev_err(hsotg->dev, "%s: no device registers to restore\n",
204 __func__);
205 return -EINVAL;
206 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200207 dr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200208
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300209 dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
210 dwc2_writel(dr->dctl, hsotg->regs + DCTL);
211 dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
212 dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
213 dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200214
215 for (i = 0; i < hsotg->num_of_eps; i++) {
216 /* Restore IN EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300217 dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
218 dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
219 dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200220
221 /* Restore OUT EPs */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300222 dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
223 dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
224 dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200225 }
226
227 /* Set the Power-On Programming done bit */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300228 dctl = dwc2_readl(hsotg->regs + DCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200229 dctl |= DCTL_PWRONPRGDONE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300230 dwc2_writel(dctl, hsotg->regs + DCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200231
232 return 0;
233}
234#else
235static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
236{ return 0; }
237
238static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
239{ return 0; }
240#endif
241
242/**
243 * dwc2_backup_global_registers() - Backup global controller registers.
244 * When suspending usb bus, registers needs to be backuped
245 * if controller power is disabled once suspended.
246 *
247 * @hsotg: Programming view of the DWC_otg controller
248 */
249static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
250{
251 struct dwc2_gregs_backup *gr;
252 int i;
253
254 /* Backup global regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200255 gr = &hsotg->gr_backup;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200256
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300257 gr->gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
258 gr->gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
259 gr->gahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
260 gr->gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
261 gr->grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
262 gr->gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
263 gr->hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
264 gr->gdfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200265 for (i = 0; i < MAX_EPS_CHANNELS; i++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300266 gr->dtxfsiz[i] = dwc2_readl(hsotg->regs + DPTXFSIZN(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200267
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200268 gr->valid = true;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200269 return 0;
270}
271
272/**
273 * dwc2_restore_global_registers() - Restore controller global registers.
274 * When resuming usb bus, device registers needs to be restored
275 * if controller power were disabled.
276 *
277 * @hsotg: Programming view of the DWC_otg controller
278 */
279static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
280{
281 struct dwc2_gregs_backup *gr;
282 int i;
283
284 dev_dbg(hsotg->dev, "%s\n", __func__);
285
286 /* Restore global regs */
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200287 gr = &hsotg->gr_backup;
288 if (!gr->valid) {
Gregory Herrerod17ee772015-04-29 22:09:01 +0200289 dev_err(hsotg->dev, "%s: no global registers to restore\n",
290 __func__);
291 return -EINVAL;
292 }
Mian Yousaf Kaukabcc1e2042015-06-29 11:05:30 +0200293 gr->valid = false;
Gregory Herrerod17ee772015-04-29 22:09:01 +0200294
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300295 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
296 dwc2_writel(gr->gotgctl, hsotg->regs + GOTGCTL);
297 dwc2_writel(gr->gintmsk, hsotg->regs + GINTMSK);
298 dwc2_writel(gr->gusbcfg, hsotg->regs + GUSBCFG);
299 dwc2_writel(gr->gahbcfg, hsotg->regs + GAHBCFG);
300 dwc2_writel(gr->grxfsiz, hsotg->regs + GRXFSIZ);
301 dwc2_writel(gr->gnptxfsiz, hsotg->regs + GNPTXFSIZ);
302 dwc2_writel(gr->hptxfsiz, hsotg->regs + HPTXFSIZ);
303 dwc2_writel(gr->gdfifocfg, hsotg->regs + GDFIFOCFG);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200304 for (i = 0; i < MAX_EPS_CHANNELS; i++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300305 dwc2_writel(gr->dtxfsiz[i], hsotg->regs + DPTXFSIZN(i));
Gregory Herrerod17ee772015-04-29 22:09:01 +0200306
307 return 0;
308}
309
310/**
311 * dwc2_exit_hibernation() - Exit controller from Partial Power Down.
312 *
313 * @hsotg: Programming view of the DWC_otg controller
314 * @restore: Controller registers need to be restored
315 */
316int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
317{
318 u32 pcgcctl;
319 int ret = 0;
320
Gregory Herrero285046a2015-04-29 22:09:19 +0200321 if (!hsotg->core_params->hibernation)
322 return -ENOTSUPP;
323
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300324 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200325 pcgcctl &= ~PCGCTL_STOPPCLK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300326 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200327
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300328 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200329 pcgcctl &= ~PCGCTL_PWRCLMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300330 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200331
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300332 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200333 pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300334 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200335
336 udelay(100);
337 if (restore) {
338 ret = dwc2_restore_global_registers(hsotg);
339 if (ret) {
340 dev_err(hsotg->dev, "%s: failed to restore registers\n",
341 __func__);
342 return ret;
343 }
344 if (dwc2_is_host_mode(hsotg)) {
345 ret = dwc2_restore_host_registers(hsotg);
346 if (ret) {
347 dev_err(hsotg->dev, "%s: failed to restore host registers\n",
348 __func__);
349 return ret;
350 }
351 } else {
352 ret = dwc2_restore_device_registers(hsotg);
353 if (ret) {
354 dev_err(hsotg->dev, "%s: failed to restore device registers\n",
355 __func__);
356 return ret;
357 }
358 }
359 }
360
361 return ret;
362}
363
364/**
365 * dwc2_enter_hibernation() - Put controller in Partial Power Down.
366 *
367 * @hsotg: Programming view of the DWC_otg controller
368 */
369int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
370{
371 u32 pcgcctl;
372 int ret = 0;
373
Gregory Herrero285046a2015-04-29 22:09:19 +0200374 if (!hsotg->core_params->hibernation)
375 return -ENOTSUPP;
376
Gregory Herrerod17ee772015-04-29 22:09:01 +0200377 /* Backup all registers */
378 ret = dwc2_backup_global_registers(hsotg);
379 if (ret) {
380 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
381 __func__);
382 return ret;
383 }
384
385 if (dwc2_is_host_mode(hsotg)) {
386 ret = dwc2_backup_host_registers(hsotg);
387 if (ret) {
388 dev_err(hsotg->dev, "%s: failed to backup host registers\n",
389 __func__);
390 return ret;
391 }
392 } else {
393 ret = dwc2_backup_device_registers(hsotg);
394 if (ret) {
395 dev_err(hsotg->dev, "%s: failed to backup device registers\n",
396 __func__);
397 return ret;
398 }
399 }
400
Gregory Herrerocad73da2015-09-22 15:16:49 +0200401 /*
402 * Clear any pending interrupts since dwc2 will not be able to
403 * clear them after entering hibernation.
404 */
405 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
406
Gregory Herrerod17ee772015-04-29 22:09:01 +0200407 /* Put the controller in low power state */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300408 pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200409
410 pcgcctl |= PCGCTL_PWRCLMP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300411 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200412 ndelay(20);
413
414 pcgcctl |= PCGCTL_RSTPDWNMODULE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300415 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200416 ndelay(20);
417
418 pcgcctl |= PCGCTL_STOPPCLK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300419 dwc2_writel(pcgcctl, hsotg->regs + PCGCTL);
Gregory Herrerod17ee772015-04-29 22:09:01 +0200420
421 return ret;
422}
423
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700424/**
425 * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
426 * used in both device and host modes
427 *
428 * @hsotg: Programming view of the DWC_otg controller
429 */
430static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
431{
432 u32 intmsk;
433
434 /* Clear any pending OTG Interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300435 dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700436
437 /* Clear any pending interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300438 dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700439
440 /* Enable the interrupts in the GINTMSK */
441 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
442
443 if (hsotg->core_params->dma_enable <= 0)
444 intmsk |= GINTSTS_RXFLVL;
Gregory Herreroa6d249d2015-04-29 22:09:04 +0200445 if (hsotg->core_params->external_id_pin_ctl <= 0)
446 intmsk |= GINTSTS_CONIDSTSCHNG;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700447
Gregory Herreroa6d249d2015-04-29 22:09:04 +0200448 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700449 GINTSTS_SESSREQINT;
450
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300451 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700452}
453
454/*
455 * Initializes the FSLSPClkSel field of the HCFG register depending on the
456 * PHY type
457 */
458static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
459{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700460 u32 hcfg, val;
461
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200462 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
463 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700464 hsotg->core_params->ulpi_fs_ls > 0) ||
465 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
466 /* Full speed PHY */
467 val = HCFG_FSLSPCLKSEL_48_MHZ;
468 } else {
469 /* High speed PHY running at full speed or high speed */
470 val = HCFG_FSLSPCLKSEL_30_60_MHZ;
471 }
472
473 dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300474 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700475 hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200476 hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300477 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700478}
479
480/*
481 * Do core a soft reset of the core. Be careful with this because it
482 * resets all the internal state machines of the core.
483 */
John Younb5d308a2015-12-17 11:16:03 -0800484int dwc2_core_reset(struct dwc2_hsotg *hsotg)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700485{
486 u32 greset;
487 int count = 0;
488
489 dev_vdbg(hsotg->dev, "%s()\n", __func__);
490
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700491 /* Core Soft Reset */
John Younb8ccc592015-12-17 11:15:35 -0800492 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700493 greset |= GRSTCTL_CSFTRST;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300494 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700495 do {
Yunzhi Li20bde642015-12-17 11:15:08 -0800496 udelay(1);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300497 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700498 if (++count > 50) {
499 dev_warn(hsotg->dev,
500 "%s() HANG! Soft Reset GRSTCTL=%0x\n",
501 __func__, greset);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100502 return -EBUSY;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700503 }
504 } while (greset & GRSTCTL_CSFTRST);
505
John Younb8ccc592015-12-17 11:15:35 -0800506 /* Wait for AHB master IDLE state */
507 count = 0;
508 do {
509 udelay(1);
510 greset = dwc2_readl(hsotg->regs + GRSTCTL);
511 if (++count > 50) {
512 dev_warn(hsotg->dev,
513 "%s() HANG! AHB Idle GRSTCTL=%0x\n",
514 __func__, greset);
515 return -EBUSY;
516 }
517 } while (!(greset & GRSTCTL_AHBIDLE));
518
John Younb5d308a2015-12-17 11:16:03 -0800519 return 0;
520}
521
522/*
523 * Do core a soft reset of the core. Be careful with this because it
524 * resets all the internal state machines of the core.
525 *
526 * Additionally this will apply force mode as per the hsotg->dr_mode
527 * parameter.
528 */
529int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg)
530{
531 int retval;
532 u32 gusbcfg;
533
534 retval = dwc2_core_reset(hsotg);
535 if (retval)
536 return retval;
537
Kever Yangc0155b92014-08-06 09:01:50 +0800538 if (hsotg->dr_mode == USB_DR_MODE_HOST) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300539 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800540 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
541 gusbcfg |= GUSBCFG_FORCEHOSTMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300542 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800543 } else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300544 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800545 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
546 gusbcfg |= GUSBCFG_FORCEDEVMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300547 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800548 } else if (hsotg->dr_mode == USB_DR_MODE_OTG) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300549 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800550 gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
551 gusbcfg &= ~GUSBCFG_FORCEDEVMODE;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300552 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Kever Yangc0155b92014-08-06 09:01:50 +0800553 }
554
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700555 /*
556 * NOTE: This long sleep is _very_ important, otherwise the core will
557 * not stay in host mode after a connector ID change!
558 */
Yunzhi Li20bde642015-12-17 11:15:08 -0800559 usleep_range(150000, 160000);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100560
561 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700562}
563
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100564static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700565{
566 u32 usbcfg, i2cctl;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100567 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700568
569 /*
570 * core_init() is now called on every switch so only call the
571 * following for the first time through
572 */
573 if (select_phy) {
574 dev_dbg(hsotg->dev, "FS PHY selected\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700575
Douglas Anderson7d56cc22015-12-17 11:15:21 -0800576 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
577 if (!(usbcfg & GUSBCFG_PHYSEL)) {
578 usbcfg |= GUSBCFG_PHYSEL;
579 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
580
581 /* Reset after a PHY select */
John Youn6d58f342015-12-17 11:15:49 -0800582 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
Douglas Anderson7d56cc22015-12-17 11:15:21 -0800583
584 if (retval) {
585 dev_err(hsotg->dev,
586 "%s: Reset failed, aborting", __func__);
587 return retval;
588 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100589 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700590 }
591
592 /*
593 * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
594 * do this on HNP Dev/Host mode switches (done in dev_init and
595 * host_init).
596 */
597 if (dwc2_is_host_mode(hsotg))
598 dwc2_init_fs_ls_pclk_sel(hsotg);
599
600 if (hsotg->core_params->i2c_enable > 0) {
601 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
602
603 /* Program GUSBCFG.OtgUtmiFsSel to I2C */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300604 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700605 usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300606 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700607
608 /* Program GI2CCTL.I2CEn */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300609 i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700610 i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
611 i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
612 i2cctl &= ~GI2CCTL_I2CEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300613 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700614 i2cctl |= GI2CCTL_I2CEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300615 dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700616 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100617
618 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700619}
620
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100621static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700622{
Douglas Anderson7d56cc22015-12-17 11:15:21 -0800623 u32 usbcfg, usbcfg_old;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100624 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700625
626 if (!select_phy)
Paul Zimmermana23666c2014-02-04 11:42:15 -0800627 return 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700628
Douglas Anderson7d56cc22015-12-17 11:15:21 -0800629 usbcfg = usbcfg_old = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700630
631 /*
632 * HS PHY parameters. These parameters are preserved during soft reset
633 * so only program the first time. Do a soft reset immediately after
634 * setting phyif.
635 */
636 switch (hsotg->core_params->phy_type) {
637 case DWC2_PHY_TYPE_PARAM_ULPI:
638 /* ULPI interface */
639 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
640 usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
641 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
642 if (hsotg->core_params->phy_ulpi_ddr > 0)
643 usbcfg |= GUSBCFG_DDRSEL;
644 break;
645 case DWC2_PHY_TYPE_PARAM_UTMI:
646 /* UTMI+ interface */
647 dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
648 usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
649 if (hsotg->core_params->phy_utmi_width == 16)
650 usbcfg |= GUSBCFG_PHYIF16;
651 break;
652 default:
653 dev_err(hsotg->dev, "FS PHY selected at HS!\n");
654 break;
655 }
656
Douglas Anderson7d56cc22015-12-17 11:15:21 -0800657 if (usbcfg != usbcfg_old) {
658 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700659
Douglas Anderson7d56cc22015-12-17 11:15:21 -0800660 /* Reset after setting the PHY parameters */
John Youn6d58f342015-12-17 11:15:49 -0800661 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
Douglas Anderson7d56cc22015-12-17 11:15:21 -0800662 if (retval) {
663 dev_err(hsotg->dev,
664 "%s: Reset failed, aborting", __func__);
665 return retval;
666 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100667 }
668
669 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700670}
671
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100672static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700673{
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200674 u32 usbcfg;
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100675 int retval = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700676
677 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
678 hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
679 /* If FS mode with FS PHY */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100680 retval = dwc2_fs_phy_init(hsotg, select_phy);
681 if (retval)
682 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700683 } else {
684 /* High speed PHY */
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100685 retval = dwc2_hs_phy_init(hsotg, select_phy);
686 if (retval)
687 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700688 }
689
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200690 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
691 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700692 hsotg->core_params->ulpi_fs_ls > 0) {
693 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300694 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700695 usbcfg |= GUSBCFG_ULPI_FS_LS;
696 usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300697 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700698 } else {
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300699 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700700 usbcfg &= ~GUSBCFG_ULPI_FS_LS;
701 usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300702 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700703 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100704
705 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700706}
707
708static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
709{
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300710 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700711
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200712 switch (hsotg->hw_params.arch) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700713 case GHWCFG2_EXT_DMA_ARCH:
714 dev_err(hsotg->dev, "External DMA Mode not supported\n");
715 return -EINVAL;
716
717 case GHWCFG2_INT_DMA_ARCH:
718 dev_dbg(hsotg->dev, "Internal DMA Mode\n");
Paul Zimmerman4d3190e2013-07-16 12:22:12 -0700719 if (hsotg->core_params->ahbcfg != -1) {
720 ahbcfg &= GAHBCFG_CTRL_MASK;
721 ahbcfg |= hsotg->core_params->ahbcfg &
722 ~GAHBCFG_CTRL_MASK;
723 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700724 break;
725
726 case GHWCFG2_SLAVE_ONLY_ARCH:
727 default:
728 dev_dbg(hsotg->dev, "Slave Only Mode\n");
729 break;
730 }
731
732 dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
733 hsotg->core_params->dma_enable,
734 hsotg->core_params->dma_desc_enable);
735
736 if (hsotg->core_params->dma_enable > 0) {
737 if (hsotg->core_params->dma_desc_enable > 0)
738 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
739 else
740 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
741 } else {
742 dev_dbg(hsotg->dev, "Using Slave mode\n");
743 hsotg->core_params->dma_desc_enable = 0;
744 }
745
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700746 if (hsotg->core_params->dma_enable > 0)
747 ahbcfg |= GAHBCFG_DMA_EN;
748
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300749 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700750
751 return 0;
752}
753
754static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
755{
756 u32 usbcfg;
757
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300758 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700759 usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
760
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200761 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700762 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
763 if (hsotg->core_params->otg_cap ==
764 DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
765 usbcfg |= GUSBCFG_HNPCAP;
766 if (hsotg->core_params->otg_cap !=
767 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
768 usbcfg |= GUSBCFG_SRPCAP;
769 break;
770
771 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
772 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
773 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
774 if (hsotg->core_params->otg_cap !=
775 DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
776 usbcfg |= GUSBCFG_SRPCAP;
777 break;
778
779 case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
780 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
781 case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
782 default:
783 break;
784 }
785
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300786 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700787}
788
789/**
790 * dwc2_core_init() - Initializes the DWC_otg controller registers and
791 * prepares the core for device mode or host mode operation
792 *
Douglas Anderson0fe239b2015-12-17 11:14:40 -0800793 * @hsotg: Programming view of the DWC_otg controller
794 * @initial_setup: If true then this is the first init for this instance.
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700795 */
Douglas Anderson0fe239b2015-12-17 11:14:40 -0800796int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700797{
798 u32 usbcfg, otgctl;
799 int retval;
800
801 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
802
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300803 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700804
805 /* Set ULPI External VBUS bit if needed */
806 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
807 if (hsotg->core_params->phy_ulpi_ext_vbus ==
808 DWC2_PHY_ULPI_EXTERNAL_VBUS)
809 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
810
811 /* Set external TS Dline pulsing bit if needed */
812 usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
813 if (hsotg->core_params->ts_dline > 0)
814 usbcfg |= GUSBCFG_TERMSELDLPULSE;
815
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300816 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700817
Douglas Anderson0fe239b2015-12-17 11:14:40 -0800818 /*
819 * Reset the Controller
820 *
821 * We only need to reset the controller if this is a re-init.
822 * For the first init we know for sure that earlier code reset us (it
823 * needed to in order to properly detect various parameters).
824 */
825 if (!initial_setup) {
John Youn6d58f342015-12-17 11:15:49 -0800826 retval = dwc2_core_reset_and_force_dr_mode(hsotg);
Douglas Anderson0fe239b2015-12-17 11:14:40 -0800827 if (retval) {
828 dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
829 __func__);
830 return retval;
831 }
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100832 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700833
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700834 /*
835 * This needs to happen in FS mode before any other programming occurs
836 */
Douglas Anderson0fe239b2015-12-17 11:14:40 -0800837 retval = dwc2_phy_init(hsotg, initial_setup);
Julien DELACOUbeb7e592013-11-20 17:29:49 +0100838 if (retval)
839 return retval;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700840
841 /* Program the GAHBCFG Register */
842 retval = dwc2_gahbcfg_init(hsotg);
843 if (retval)
844 return retval;
845
846 /* Program the GUSBCFG register */
847 dwc2_gusbcfg_init(hsotg);
848
849 /* Program the GOTGCTL register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300850 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700851 otgctl &= ~GOTGCTL_OTGVER;
852 if (hsotg->core_params->otg_ver > 0)
853 otgctl |= GOTGCTL_OTGVER;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300854 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700855 dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
856
857 /* Clear the SRP success bit for FS-I2c */
858 hsotg->srp_success = 0;
859
860 /* Enable common interrupts */
861 dwc2_enable_common_interrupts(hsotg);
862
863 /*
Mickael Maison997f4f82014-12-23 17:39:45 +0100864 * Do device or host initialization based on mode during PCD and
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700865 * HCD initialization
866 */
867 if (dwc2_is_host_mode(hsotg)) {
868 dev_dbg(hsotg->dev, "Host Mode\n");
869 hsotg->op_state = OTG_STATE_A_HOST;
870 } else {
871 dev_dbg(hsotg->dev, "Device Mode\n");
872 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
873 }
874
875 return 0;
876}
877
878/**
879 * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
880 *
881 * @hsotg: Programming view of DWC_otg controller
882 */
883void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
884{
885 u32 intmsk;
886
887 dev_dbg(hsotg->dev, "%s()\n", __func__);
888
889 /* Disable all interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300890 dwc2_writel(0, hsotg->regs + GINTMSK);
891 dwc2_writel(0, hsotg->regs + HAINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700892
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700893 /* Enable the common interrupts */
894 dwc2_enable_common_interrupts(hsotg);
895
896 /* Enable host mode interrupts without disturbing common interrupts */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300897 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Mian Yousaf Kaukab44e4a602015-10-12 11:23:27 +0200898 intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300899 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700900}
901
902/**
903 * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
904 *
905 * @hsotg: Programming view of DWC_otg controller
906 */
907void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
908{
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300909 u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700910
911 /* Disable host mode interrupts without disturbing common interrupts */
912 intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
Mian Yousaf Kaukab77dbf712015-09-22 15:16:47 +0200913 GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300914 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700915}
916
Dinh Nguyen112fe8e2014-05-07 08:31:29 -0500917/*
918 * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
919 * For system that have a total fifo depth that is smaller than the default
920 * RX + TX fifo size.
921 *
922 * @hsotg: Programming view of DWC_otg controller
923 */
924static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
925{
926 struct dwc2_core_params *params = hsotg->core_params;
927 struct dwc2_hw_params *hw = &hsotg->hw_params;
928 u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
929
930 total_fifo_size = hw->total_fifo_size;
931 rxfsiz = params->host_rx_fifo_size;
932 nptxfsiz = params->host_nperio_tx_fifo_size;
933 ptxfsiz = params->host_perio_tx_fifo_size;
934
935 /*
936 * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
937 * allocation with support for high bandwidth endpoints. Synopsys
938 * defines MPS(Max Packet size) for a periodic EP=1024, and for
939 * non-periodic as 512.
940 */
941 if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
942 /*
943 * For Buffer DMA mode/Scatter Gather DMA mode
944 * 2 * ((Largest Packet size / 4) + 1 + 1) + n
945 * with n = number of host channel.
946 * 2 * ((1024/4) + 2) = 516
947 */
948 rxfsiz = 516 + hw->host_channels;
949
950 /*
951 * min non-periodic tx fifo depth
952 * 2 * (largest non-periodic USB packet used / 4)
953 * 2 * (512/4) = 256
954 */
955 nptxfsiz = 256;
956
957 /*
958 * min periodic tx fifo depth
959 * (largest packet size*MC)/4
960 * (1024 * 3)/4 = 768
961 */
962 ptxfsiz = 768;
963
964 params->host_rx_fifo_size = rxfsiz;
965 params->host_nperio_tx_fifo_size = nptxfsiz;
966 params->host_perio_tx_fifo_size = ptxfsiz;
967 }
968
969 /*
970 * If the summation of RX, NPTX and PTX fifo sizes is still
971 * bigger than the total_fifo_size, then we have a problem.
972 *
973 * We won't be able to allocate as many endpoints. Right now,
974 * we're just printing an error message, but ideally this FIFO
975 * allocation algorithm would be improved in the future.
976 *
977 * FIXME improve this FIFO allocation algorithm.
978 */
979 if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
980 dev_err(hsotg->dev, "invalid fifo sizes\n");
981}
982
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700983static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
984{
985 struct dwc2_core_params *params = hsotg->core_params;
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200986 u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700987
Matthijs Kooijman12086052013-04-29 19:46:35 +0000988 if (!params->enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700989 return;
990
Dinh Nguyen112fe8e2014-05-07 08:31:29 -0500991 dwc2_calculate_dynamic_fifo(hsotg);
992
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -0700993 /* Rx FIFO */
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300994 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
Matthijs Kooijmana1fc5242013-08-30 18:45:20 +0200995 dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
996 grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
997 grxfsiz |= params->host_rx_fifo_size <<
998 GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +0300999 dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
1000 dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
1001 dwc2_readl(hsotg->regs + GRXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001002
1003 /* Non-periodic Tx FIFO */
1004 dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001005 dwc2_readl(hsotg->regs + GNPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001006 nptxfsiz = params->host_nperio_tx_fifo_size <<
1007 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1008 nptxfsiz |= params->host_rx_fifo_size <<
1009 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001010 dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001011 dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001012 dwc2_readl(hsotg->regs + GNPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001013
1014 /* Periodic Tx FIFO */
1015 dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001016 dwc2_readl(hsotg->regs + HPTXFSIZ));
Matthijs Kooijmanc35205a2013-08-30 18:45:18 +02001017 hptxfsiz = params->host_perio_tx_fifo_size <<
1018 FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
1019 hptxfsiz |= (params->host_rx_fifo_size +
1020 params->host_nperio_tx_fifo_size) <<
1021 FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001022 dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001023 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001024 dwc2_readl(hsotg->regs + HPTXFSIZ));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001025
1026 if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001027 hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001028 /*
1029 * Global DFIFOCFG calculation for Host mode -
1030 * include RxFIFO, NPTXFIFO and HPTXFIFO
1031 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001032 dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001033 dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
Matthijs Kooijman08b9f9d2013-08-30 18:45:19 +02001034 dfifocfg |= (params->host_rx_fifo_size +
1035 params->host_nperio_tx_fifo_size +
1036 params->host_perio_tx_fifo_size) <<
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001037 GDFIFOCFG_EPINFOBASE_SHIFT &
1038 GDFIFOCFG_EPINFOBASE_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001039 dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001040 }
1041}
1042
1043/**
1044 * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
1045 * Host mode
1046 *
1047 * @hsotg: Programming view of DWC_otg controller
1048 *
1049 * This function flushes the Tx and Rx FIFOs and flushes any entries in the
1050 * request queues. Host channels are reset to ensure that they are ready for
1051 * performing transfers.
1052 */
1053void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
1054{
1055 u32 hcfg, hfir, otgctl;
1056
1057 dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
1058
1059 /* Restart the Phy Clock */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001060 dwc2_writel(0, hsotg->regs + PCGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001061
1062 /* Initialize Host Configuration Register */
1063 dwc2_init_fs_ls_pclk_sel(hsotg);
1064 if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001065 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001066 hcfg |= HCFG_FSLSSUPP;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001067 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001068 }
1069
1070 /*
1071 * This bit allows dynamic reloading of the HFIR register during
Masanari Iida0dcde5082013-09-13 23:34:36 +09001072 * runtime. This bit needs to be programmed during initial configuration
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001073 * and its value must not be changed during runtime.
1074 */
1075 if (hsotg->core_params->reload_ctl > 0) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001076 hfir = dwc2_readl(hsotg->regs + HFIR);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001077 hfir |= HFIR_RLDCTRL;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001078 dwc2_writel(hfir, hsotg->regs + HFIR);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001079 }
1080
1081 if (hsotg->core_params->dma_desc_enable > 0) {
Matthijs Kooijman9badec22013-08-30 18:45:21 +02001082 u32 op_mode = hsotg->hw_params.op_mode;
1083 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
1084 !hsotg->hw_params.dma_desc_enable ||
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001085 op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
1086 op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
1087 op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
1088 dev_err(hsotg->dev,
1089 "Hardware does not support descriptor DMA mode -\n");
1090 dev_err(hsotg->dev,
1091 "falling back to buffer DMA mode.\n");
1092 hsotg->core_params->dma_desc_enable = 0;
1093 } else {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001094 hcfg = dwc2_readl(hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001095 hcfg |= HCFG_DESCDMA;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001096 dwc2_writel(hcfg, hsotg->regs + HCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001097 }
1098 }
1099
1100 /* Configure data FIFO sizes */
1101 dwc2_config_fifos(hsotg);
1102
1103 /* TODO - check this */
1104 /* Clear Host Set HNP Enable in the OTG Control Register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001105 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001106 otgctl &= ~GOTGCTL_HSTSETHNPEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001107 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001108
1109 /* Make sure the FIFOs are flushed */
1110 dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
1111 dwc2_flush_rx_fifo(hsotg);
1112
1113 /* Clear Host Set HNP Enable in the OTG Control Register */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001114 otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001115 otgctl &= ~GOTGCTL_HSTSETHNPEN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001116 dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001117
1118 if (hsotg->core_params->dma_desc_enable <= 0) {
1119 int num_channels, i;
1120 u32 hcchar;
1121
1122 /* Flush out any leftover queued requests */
1123 num_channels = hsotg->core_params->host_channels;
1124 for (i = 0; i < num_channels; i++) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001125 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001126 hcchar &= ~HCCHAR_CHENA;
1127 hcchar |= HCCHAR_CHDIS;
1128 hcchar &= ~HCCHAR_EPDIR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001129 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001130 }
1131
1132 /* Halt all channels to put them into a known state */
1133 for (i = 0; i < num_channels; i++) {
1134 int count = 0;
1135
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001136 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001137 hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
1138 hcchar &= ~HCCHAR_EPDIR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001139 dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001140 dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
1141 __func__, i);
1142 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001143 hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001144 if (++count > 1000) {
1145 dev_err(hsotg->dev,
1146 "Unable to clear enable on channel %d\n",
1147 i);
1148 break;
1149 }
1150 udelay(1);
1151 } while (hcchar & HCCHAR_CHENA);
1152 }
1153 }
1154
1155 /* Turn on the vbus power */
1156 dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
1157 if (hsotg->op_state == OTG_STATE_A_HOST) {
1158 u32 hprt0 = dwc2_read_hprt0(hsotg);
1159
1160 dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
1161 !!(hprt0 & HPRT0_PWR));
1162 if (!(hprt0 & HPRT0_PWR)) {
1163 hprt0 |= HPRT0_PWR;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001164 dwc2_writel(hprt0, hsotg->regs + HPRT0);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001165 }
1166 }
1167
1168 dwc2_enable_host_interrupts(hsotg);
1169}
1170
1171static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
1172 struct dwc2_host_chan *chan)
1173{
1174 u32 hcintmsk = HCINTMSK_CHHLTD;
1175
1176 switch (chan->ep_type) {
1177 case USB_ENDPOINT_XFER_CONTROL:
1178 case USB_ENDPOINT_XFER_BULK:
1179 dev_vdbg(hsotg->dev, "control/bulk\n");
1180 hcintmsk |= HCINTMSK_XFERCOMPL;
1181 hcintmsk |= HCINTMSK_STALL;
1182 hcintmsk |= HCINTMSK_XACTERR;
1183 hcintmsk |= HCINTMSK_DATATGLERR;
1184 if (chan->ep_is_in) {
1185 hcintmsk |= HCINTMSK_BBLERR;
1186 } else {
1187 hcintmsk |= HCINTMSK_NAK;
1188 hcintmsk |= HCINTMSK_NYET;
1189 if (chan->do_ping)
1190 hcintmsk |= HCINTMSK_ACK;
1191 }
1192
1193 if (chan->do_split) {
1194 hcintmsk |= HCINTMSK_NAK;
1195 if (chan->complete_split)
1196 hcintmsk |= HCINTMSK_NYET;
1197 else
1198 hcintmsk |= HCINTMSK_ACK;
1199 }
1200
1201 if (chan->error_state)
1202 hcintmsk |= HCINTMSK_ACK;
1203 break;
1204
1205 case USB_ENDPOINT_XFER_INT:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001206 if (dbg_perio())
1207 dev_vdbg(hsotg->dev, "intr\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001208 hcintmsk |= HCINTMSK_XFERCOMPL;
1209 hcintmsk |= HCINTMSK_NAK;
1210 hcintmsk |= HCINTMSK_STALL;
1211 hcintmsk |= HCINTMSK_XACTERR;
1212 hcintmsk |= HCINTMSK_DATATGLERR;
1213 hcintmsk |= HCINTMSK_FRMOVRUN;
1214
1215 if (chan->ep_is_in)
1216 hcintmsk |= HCINTMSK_BBLERR;
1217 if (chan->error_state)
1218 hcintmsk |= HCINTMSK_ACK;
1219 if (chan->do_split) {
1220 if (chan->complete_split)
1221 hcintmsk |= HCINTMSK_NYET;
1222 else
1223 hcintmsk |= HCINTMSK_ACK;
1224 }
1225 break;
1226
1227 case USB_ENDPOINT_XFER_ISOC:
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001228 if (dbg_perio())
1229 dev_vdbg(hsotg->dev, "isoc\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001230 hcintmsk |= HCINTMSK_XFERCOMPL;
1231 hcintmsk |= HCINTMSK_FRMOVRUN;
1232 hcintmsk |= HCINTMSK_ACK;
1233
1234 if (chan->ep_is_in) {
1235 hcintmsk |= HCINTMSK_XACTERR;
1236 hcintmsk |= HCINTMSK_BBLERR;
1237 }
1238 break;
1239 default:
1240 dev_err(hsotg->dev, "## Unknown EP type ##\n");
1241 break;
1242 }
1243
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001244 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001245 if (dbg_hc(chan))
1246 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001247}
1248
1249static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
1250 struct dwc2_host_chan *chan)
1251{
1252 u32 hcintmsk = HCINTMSK_CHHLTD;
1253
1254 /*
1255 * For Descriptor DMA mode core halts the channel on AHB error.
1256 * Interrupt is not required.
1257 */
1258 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001259 if (dbg_hc(chan))
1260 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001261 hcintmsk |= HCINTMSK_AHBERR;
1262 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001263 if (dbg_hc(chan))
1264 dev_vdbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001265 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1266 hcintmsk |= HCINTMSK_XFERCOMPL;
1267 }
1268
1269 if (chan->error_state && !chan->do_split &&
1270 chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001271 if (dbg_hc(chan))
1272 dev_vdbg(hsotg->dev, "setting ACK\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001273 hcintmsk |= HCINTMSK_ACK;
1274 if (chan->ep_is_in) {
1275 hcintmsk |= HCINTMSK_DATATGLERR;
1276 if (chan->ep_type != USB_ENDPOINT_XFER_INT)
1277 hcintmsk |= HCINTMSK_NAK;
1278 }
1279 }
1280
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001281 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001282 if (dbg_hc(chan))
1283 dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001284}
1285
1286static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
1287 struct dwc2_host_chan *chan)
1288{
1289 u32 intmsk;
1290
1291 if (hsotg->core_params->dma_enable > 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001292 if (dbg_hc(chan))
1293 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001294 dwc2_hc_enable_dma_ints(hsotg, chan);
1295 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001296 if (dbg_hc(chan))
1297 dev_vdbg(hsotg->dev, "DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001298 dwc2_hc_enable_slave_ints(hsotg, chan);
1299 }
1300
1301 /* Enable the top level host channel interrupt */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001302 intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001303 intmsk |= 1 << chan->hc_num;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001304 dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001305 if (dbg_hc(chan))
1306 dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001307
1308 /* Make sure host channel interrupts are enabled */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001309 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001310 intmsk |= GINTSTS_HCHINT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001311 dwc2_writel(intmsk, hsotg->regs + GINTMSK);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001312 if (dbg_hc(chan))
1313 dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001314}
1315
1316/**
1317 * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
1318 * a specific endpoint
1319 *
1320 * @hsotg: Programming view of DWC_otg controller
1321 * @chan: Information needed to initialize the host channel
1322 *
1323 * The HCCHARn register is set up with the characteristics specified in chan.
1324 * Host channel interrupts that may need to be serviced while this transfer is
1325 * in progress are enabled.
1326 */
1327void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1328{
1329 u8 hc_num = chan->hc_num;
1330 u32 hcintmsk;
1331 u32 hcchar;
1332 u32 hcsplt = 0;
1333
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001334 if (dbg_hc(chan))
1335 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001336
1337 /* Clear old interrupt conditions for this host channel */
1338 hcintmsk = 0xffffffff;
1339 hcintmsk &= ~HCINTMSK_RESERVED14_31;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001340 dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001341
1342 /* Enable channel interrupts required for this transfer */
1343 dwc2_hc_enable_ints(hsotg, chan);
1344
1345 /*
1346 * Program the HCCHARn register with the endpoint characteristics for
1347 * the current transfer
1348 */
1349 hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
1350 hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
1351 if (chan->ep_is_in)
1352 hcchar |= HCCHAR_EPDIR;
1353 if (chan->speed == USB_SPEED_LOW)
1354 hcchar |= HCCHAR_LSPDDEV;
1355 hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
1356 hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001357 dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001358 if (dbg_hc(chan)) {
1359 dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
1360 hc_num, hcchar);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001361
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001362 dev_vdbg(hsotg->dev, "%s: Channel %d\n",
1363 __func__, hc_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001364 dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001365 chan->dev_addr);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001366 dev_vdbg(hsotg->dev, " Ep Num: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001367 chan->ep_num);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001368 dev_vdbg(hsotg->dev, " Is In: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001369 chan->ep_is_in);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001370 dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001371 chan->speed == USB_SPEED_LOW);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001372 dev_vdbg(hsotg->dev, " Ep Type: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001373 chan->ep_type);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001374 dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001375 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001376 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001377
1378 /* Program the HCSPLT register for SPLITs */
1379 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001380 if (dbg_hc(chan))
1381 dev_vdbg(hsotg->dev,
1382 "Programming HC %d with split --> %s\n",
1383 hc_num,
1384 chan->complete_split ? "CSPLIT" : "SSPLIT");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001385 if (chan->complete_split)
1386 hcsplt |= HCSPLT_COMPSPLT;
1387 hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
1388 HCSPLT_XACTPOS_MASK;
1389 hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
1390 HCSPLT_HUBADDR_MASK;
1391 hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
1392 HCSPLT_PRTADDR_MASK;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001393 if (dbg_hc(chan)) {
1394 dev_vdbg(hsotg->dev, " comp split %d\n",
1395 chan->complete_split);
1396 dev_vdbg(hsotg->dev, " xact pos %d\n",
1397 chan->xact_pos);
1398 dev_vdbg(hsotg->dev, " hub addr %d\n",
1399 chan->hub_addr);
1400 dev_vdbg(hsotg->dev, " hub port %d\n",
1401 chan->hub_port);
1402 dev_vdbg(hsotg->dev, " is_in %d\n",
1403 chan->ep_is_in);
1404 dev_vdbg(hsotg->dev, " Max Pkt %d\n",
Matthijs Kooijman57bb8ae2013-08-30 18:45:17 +02001405 chan->max_packet);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001406 dev_vdbg(hsotg->dev, " xferlen %d\n",
1407 chan->xfer_len);
1408 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001409 }
1410
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001411 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001412}
1413
1414/**
1415 * dwc2_hc_halt() - Attempts to halt a host channel
1416 *
1417 * @hsotg: Controller register interface
1418 * @chan: Host channel to halt
1419 * @halt_status: Reason for halting the channel
1420 *
1421 * This function should only be called in Slave mode or to abort a transfer in
1422 * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
1423 * controller halts the channel when the transfer is complete or a condition
1424 * occurs that requires application intervention.
1425 *
1426 * In slave mode, checks for a free request queue entry, then sets the Channel
1427 * Enable and Channel Disable bits of the Host Channel Characteristics
1428 * register of the specified channel to intiate the halt. If there is no free
1429 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1430 * register to flush requests for this channel. In the latter case, sets a
1431 * flag to indicate that the host channel needs to be halted when a request
1432 * queue slot is open.
1433 *
1434 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1435 * HCCHARn register. The controller ensures there is space in the request
1436 * queue before submitting the halt request.
1437 *
1438 * Some time may elapse before the core flushes any posted requests for this
1439 * host channel and halts. The Channel Halted interrupt handler completes the
1440 * deactivation of the host channel.
1441 */
1442void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
1443 enum dwc2_halt_status halt_status)
1444{
1445 u32 nptxsts, hptxsts, hcchar;
1446
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001447 if (dbg_hc(chan))
1448 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001449 if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
1450 dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
1451
1452 if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1453 halt_status == DWC2_HC_XFER_AHB_ERR) {
1454 /*
1455 * Disable all channel interrupts except Ch Halted. The QTD
1456 * and QH state associated with this transfer has been cleared
1457 * (in the case of URB_DEQUEUE), so the channel needs to be
1458 * shut down carefully to prevent crashes.
1459 */
1460 u32 hcintmsk = HCINTMSK_CHHLTD;
1461
1462 dev_vdbg(hsotg->dev, "dequeue/error\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001463 dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001464
1465 /*
1466 * Make sure no other interrupts besides halt are currently
1467 * pending. Handling another interrupt could cause a crash due
1468 * to the QTD and QH state.
1469 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001470 dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001471
1472 /*
1473 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1474 * even if the channel was already halted for some other
1475 * reason
1476 */
1477 chan->halt_status = halt_status;
1478
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001479 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001480 if (!(hcchar & HCCHAR_CHENA)) {
1481 /*
1482 * The channel is either already halted or it hasn't
1483 * started yet. In DMA mode, the transfer may halt if
1484 * it finishes normally or a condition occurs that
1485 * requires driver intervention. Don't want to halt
1486 * the channel again. In either Slave or DMA mode,
1487 * it's possible that the transfer has been assigned
1488 * to a channel, but not started yet when an URB is
1489 * dequeued. Don't want to halt a channel that hasn't
1490 * started yet.
1491 */
1492 return;
1493 }
1494 }
1495 if (chan->halt_pending) {
1496 /*
1497 * A halt has already been issued for this channel. This might
1498 * happen when a transfer is aborted by a higher level in
1499 * the stack.
1500 */
1501 dev_vdbg(hsotg->dev,
1502 "*** %s: Channel %d, chan->halt_pending already set ***\n",
1503 __func__, chan->hc_num);
1504 return;
1505 }
1506
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001507 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001508
1509 /* No need to set the bit in DDMA for disabling the channel */
1510 /* TODO check it everywhere channel is disabled */
1511 if (hsotg->core_params->dma_desc_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001512 if (dbg_hc(chan))
1513 dev_vdbg(hsotg->dev, "desc DMA disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001514 hcchar |= HCCHAR_CHENA;
1515 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001516 if (dbg_hc(chan))
1517 dev_dbg(hsotg->dev, "desc DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001518 }
1519 hcchar |= HCCHAR_CHDIS;
1520
1521 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001522 if (dbg_hc(chan))
1523 dev_vdbg(hsotg->dev, "DMA not enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001524 hcchar |= HCCHAR_CHENA;
1525
1526 /* Check for space in the request queue to issue the halt */
1527 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1528 chan->ep_type == USB_ENDPOINT_XFER_BULK) {
1529 dev_vdbg(hsotg->dev, "control/bulk\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001530 nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001531 if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
1532 dev_vdbg(hsotg->dev, "Disabling channel\n");
1533 hcchar &= ~HCCHAR_CHENA;
1534 }
1535 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001536 if (dbg_perio())
1537 dev_vdbg(hsotg->dev, "isoc/intr\n");
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001538 hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001539 if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
1540 hsotg->queuing_high_bandwidth) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001541 if (dbg_perio())
1542 dev_vdbg(hsotg->dev, "Disabling channel\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001543 hcchar &= ~HCCHAR_CHENA;
1544 }
1545 }
1546 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001547 if (dbg_hc(chan))
1548 dev_vdbg(hsotg->dev, "DMA enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001549 }
1550
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001551 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001552 chan->halt_status = halt_status;
1553
1554 if (hcchar & HCCHAR_CHENA) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001555 if (dbg_hc(chan))
1556 dev_vdbg(hsotg->dev, "Channel enabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001557 chan->halt_pending = 1;
1558 chan->halt_on_queue = 0;
1559 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001560 if (dbg_hc(chan))
1561 dev_vdbg(hsotg->dev, "Channel disabled\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001562 chan->halt_on_queue = 1;
1563 }
1564
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001565 if (dbg_hc(chan)) {
1566 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1567 chan->hc_num);
1568 dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
1569 hcchar);
1570 dev_vdbg(hsotg->dev, " halt_pending: %d\n",
1571 chan->halt_pending);
1572 dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
1573 chan->halt_on_queue);
1574 dev_vdbg(hsotg->dev, " halt_status: %d\n",
1575 chan->halt_status);
1576 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001577}
1578
1579/**
1580 * dwc2_hc_cleanup() - Clears the transfer state for a host channel
1581 *
1582 * @hsotg: Programming view of DWC_otg controller
1583 * @chan: Identifies the host channel to clean up
1584 *
1585 * This function is normally called after a transfer is done and the host
1586 * channel is being released
1587 */
1588void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
1589{
1590 u32 hcintmsk;
1591
1592 chan->xfer_started = 0;
1593
1594 /*
1595 * Clear channel interrupt enables and any unhandled channel interrupt
1596 * conditions
1597 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001598 dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001599 hcintmsk = 0xffffffff;
1600 hcintmsk &= ~HCINTMSK_RESERVED14_31;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001601 dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001602}
1603
1604/**
1605 * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
1606 * which frame a periodic transfer should occur
1607 *
1608 * @hsotg: Programming view of DWC_otg controller
1609 * @chan: Identifies the host channel to set up and its properties
1610 * @hcchar: Current value of the HCCHAR register for the specified host channel
1611 *
1612 * This function has no effect on non-periodic transfers
1613 */
1614static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
1615 struct dwc2_host_chan *chan, u32 *hcchar)
1616{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001617 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1618 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001619 /* 1 if _next_ frame is odd, 0 if it's even */
Paul Zimmerman81a58952013-06-24 11:34:23 -07001620 if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001621 *hcchar |= HCCHAR_ODDFRM;
1622 }
1623}
1624
1625static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
1626{
1627 /* Set up the initial PID for the transfer */
1628 if (chan->speed == USB_SPEED_HIGH) {
1629 if (chan->ep_is_in) {
1630 if (chan->multi_count == 1)
1631 chan->data_pid_start = DWC2_HC_PID_DATA0;
1632 else if (chan->multi_count == 2)
1633 chan->data_pid_start = DWC2_HC_PID_DATA1;
1634 else
1635 chan->data_pid_start = DWC2_HC_PID_DATA2;
1636 } else {
1637 if (chan->multi_count == 1)
1638 chan->data_pid_start = DWC2_HC_PID_DATA0;
1639 else
1640 chan->data_pid_start = DWC2_HC_PID_MDATA;
1641 }
1642 } else {
1643 chan->data_pid_start = DWC2_HC_PID_DATA0;
1644 }
1645}
1646
1647/**
1648 * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
1649 * the Host Channel
1650 *
1651 * @hsotg: Programming view of DWC_otg controller
1652 * @chan: Information needed to initialize the host channel
1653 *
1654 * This function should only be called in Slave mode. For a channel associated
1655 * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
1656 * associated with a periodic EP, the periodic Tx FIFO is written.
1657 *
1658 * Upon return the xfer_buf and xfer_count fields in chan are incremented by
1659 * the number of bytes written to the Tx FIFO.
1660 */
1661static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
1662 struct dwc2_host_chan *chan)
1663{
1664 u32 i;
1665 u32 remaining_count;
1666 u32 byte_count;
1667 u32 dword_count;
1668 u32 __iomem *data_fifo;
1669 u32 *data_buf = (u32 *)chan->xfer_buf;
1670
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001671 if (dbg_hc(chan))
1672 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001673
1674 data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
1675
1676 remaining_count = chan->xfer_len - chan->xfer_count;
1677 if (remaining_count > chan->max_packet)
1678 byte_count = chan->max_packet;
1679 else
1680 byte_count = remaining_count;
1681
1682 dword_count = (byte_count + 3) / 4;
1683
1684 if (((unsigned long)data_buf & 0x3) == 0) {
1685 /* xfer_buf is DWORD aligned */
1686 for (i = 0; i < dword_count; i++, data_buf++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001687 dwc2_writel(*data_buf, data_fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001688 } else {
1689 /* xfer_buf is not DWORD aligned */
1690 for (i = 0; i < dword_count; i++, data_buf++) {
1691 u32 data = data_buf[0] | data_buf[1] << 8 |
1692 data_buf[2] << 16 | data_buf[3] << 24;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001693 dwc2_writel(data, data_fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001694 }
1695 }
1696
1697 chan->xfer_count += byte_count;
1698 chan->xfer_buf += byte_count;
1699}
1700
1701/**
1702 * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
1703 * channel and starts the transfer
1704 *
1705 * @hsotg: Programming view of DWC_otg controller
1706 * @chan: Information needed to initialize the host channel. The xfer_len value
1707 * may be reduced to accommodate the max widths of the XferSize and
1708 * PktCnt fields in the HCTSIZn register. The multi_count value may be
1709 * changed to reflect the final xfer_len value.
1710 *
1711 * This function may be called in either Slave mode or DMA mode. In Slave mode,
1712 * the caller must ensure that there is sufficient space in the request queue
1713 * and Tx Data FIFO.
1714 *
1715 * For an OUT transfer in Slave mode, it loads a data packet into the
1716 * appropriate FIFO. If necessary, additional data packets are loaded in the
1717 * Host ISR.
1718 *
1719 * For an IN transfer in Slave mode, a data packet is requested. The data
1720 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1721 * additional data packets are requested in the Host ISR.
1722 *
1723 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1724 * register along with a packet count of 1 and the channel is enabled. This
1725 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1726 * simply set to 0 since no data transfer occurs in this case.
1727 *
1728 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1729 * all the information required to perform the subsequent data transfer. In
1730 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1731 * controller performs the entire PING protocol, then starts the data
1732 * transfer.
1733 */
1734void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1735 struct dwc2_host_chan *chan)
1736{
1737 u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
1738 u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
1739 u32 hcchar;
1740 u32 hctsiz = 0;
1741 u16 num_packets;
Douglas Anderson69b76cd2015-11-11 10:33:52 -08001742 u32 ec_mc;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001743
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001744 if (dbg_hc(chan))
1745 dev_vdbg(hsotg->dev, "%s()\n", __func__);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001746
1747 if (chan->do_ping) {
1748 if (hsotg->core_params->dma_enable <= 0) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001749 if (dbg_hc(chan))
1750 dev_vdbg(hsotg->dev, "ping, no DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001751 dwc2_hc_do_ping(hsotg, chan);
1752 chan->xfer_started = 1;
1753 return;
1754 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001755 if (dbg_hc(chan))
1756 dev_vdbg(hsotg->dev, "ping, DMA\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001757 hctsiz |= TSIZ_DOPNG;
1758 }
1759 }
1760
1761 if (chan->do_split) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001762 if (dbg_hc(chan))
1763 dev_vdbg(hsotg->dev, "split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001764 num_packets = 1;
1765
1766 if (chan->complete_split && !chan->ep_is_in)
1767 /*
1768 * For CSPLIT OUT Transfer, set the size to 0 so the
1769 * core doesn't expect any data written to the FIFO
1770 */
1771 chan->xfer_len = 0;
1772 else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
1773 chan->xfer_len = chan->max_packet;
1774 else if (!chan->ep_is_in && chan->xfer_len > 188)
1775 chan->xfer_len = 188;
1776
1777 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1778 TSIZ_XFERSIZE_MASK;
Douglas Anderson69b76cd2015-11-11 10:33:52 -08001779
1780 /* For split set ec_mc for immediate retries */
1781 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1782 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1783 ec_mc = 3;
1784 else
1785 ec_mc = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001786 } else {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001787 if (dbg_hc(chan))
1788 dev_vdbg(hsotg->dev, "no split\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001789 /*
1790 * Ensure that the transfer length and packet count will fit
1791 * in the widths allocated for them in the HCTSIZn register
1792 */
1793 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1794 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1795 /*
1796 * Make sure the transfer size is no larger than one
1797 * (micro)frame's worth of data. (A check was done
1798 * when the periodic transfer was accepted to ensure
1799 * that a (micro)frame's worth of data can be
1800 * programmed into a channel.)
1801 */
1802 u32 max_periodic_len =
1803 chan->multi_count * chan->max_packet;
1804
1805 if (chan->xfer_len > max_periodic_len)
1806 chan->xfer_len = max_periodic_len;
1807 } else if (chan->xfer_len > max_hc_xfer_size) {
1808 /*
1809 * Make sure that xfer_len is a multiple of max packet
1810 * size
1811 */
1812 chan->xfer_len =
1813 max_hc_xfer_size - chan->max_packet + 1;
1814 }
1815
1816 if (chan->xfer_len > 0) {
1817 num_packets = (chan->xfer_len + chan->max_packet - 1) /
1818 chan->max_packet;
1819 if (num_packets > max_hc_pkt_count) {
1820 num_packets = max_hc_pkt_count;
1821 chan->xfer_len = num_packets * chan->max_packet;
1822 }
1823 } else {
1824 /* Need 1 packet for transfer length of 0 */
1825 num_packets = 1;
1826 }
1827
1828 if (chan->ep_is_in)
1829 /*
1830 * Always program an integral # of max packets for IN
1831 * transfers
1832 */
1833 chan->xfer_len = num_packets * chan->max_packet;
1834
1835 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1836 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1837 /*
1838 * Make sure that the multi_count field matches the
1839 * actual transfer length
1840 */
1841 chan->multi_count = num_packets;
1842
1843 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1844 dwc2_set_pid_isoc(chan);
1845
1846 hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
1847 TSIZ_XFERSIZE_MASK;
Douglas Anderson69b76cd2015-11-11 10:33:52 -08001848
1849 /* The ec_mc gets the multi_count for non-split */
1850 ec_mc = chan->multi_count;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001851 }
1852
1853 chan->start_pkt_count = num_packets;
1854 hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
1855 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1856 TSIZ_SC_MC_PID_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001857 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001858 if (dbg_hc(chan)) {
1859 dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
1860 hctsiz, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001861
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001862 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1863 chan->hc_num);
1864 dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001865 (hctsiz & TSIZ_XFERSIZE_MASK) >>
1866 TSIZ_XFERSIZE_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001867 dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001868 (hctsiz & TSIZ_PKTCNT_MASK) >>
1869 TSIZ_PKTCNT_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001870 dev_vdbg(hsotg->dev, " Start PID: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001871 (hctsiz & TSIZ_SC_MC_PID_MASK) >>
1872 TSIZ_SC_MC_PID_SHIFT);
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001873 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001874
1875 if (hsotg->core_params->dma_enable > 0) {
1876 dma_addr_t dma_addr;
1877
1878 if (chan->align_buf) {
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001879 if (dbg_hc(chan))
1880 dev_vdbg(hsotg->dev, "align_buf\n");
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001881 dma_addr = chan->align_buf;
1882 } else {
1883 dma_addr = chan->xfer_dma;
1884 }
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001885 dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001886 if (dbg_hc(chan))
1887 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1888 (unsigned long)dma_addr, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001889 }
1890
1891 /* Start the split */
1892 if (chan->do_split) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001893 u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001894
1895 hcsplt |= HCSPLT_SPLTENA;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001896 dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001897 }
1898
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001899 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001900 hcchar &= ~HCCHAR_MULTICNT_MASK;
Douglas Anderson69b76cd2015-11-11 10:33:52 -08001901 hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001902 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
1903
1904 if (hcchar & HCCHAR_CHDIS)
1905 dev_warn(hsotg->dev,
1906 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1907 __func__, chan->hc_num, hcchar);
1908
1909 /* Set host channel enable after all other setup is complete */
1910 hcchar |= HCCHAR_CHENA;
1911 hcchar &= ~HCCHAR_CHDIS;
1912
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001913 if (dbg_hc(chan))
1914 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02001915 (hcchar & HCCHAR_MULTICNT_MASK) >>
1916 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001917
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001918 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001919 if (dbg_hc(chan))
1920 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
1921 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001922
1923 chan->xfer_started = 1;
1924 chan->requests++;
1925
1926 if (hsotg->core_params->dma_enable <= 0 &&
1927 !chan->ep_is_in && chan->xfer_len > 0)
1928 /* Load OUT packet into the appropriate Tx FIFO */
1929 dwc2_hc_write_packet(hsotg, chan);
1930}
1931
1932/**
1933 * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
1934 * host channel and starts the transfer in Descriptor DMA mode
1935 *
1936 * @hsotg: Programming view of DWC_otg controller
1937 * @chan: Information needed to initialize the host channel
1938 *
1939 * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
1940 * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
1941 * with micro-frame bitmap.
1942 *
1943 * Initializes HCDMA register with descriptor list address and CTD value then
1944 * starts the transfer via enabling the channel.
1945 */
1946void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
1947 struct dwc2_host_chan *chan)
1948{
1949 u32 hcchar;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001950 u32 hctsiz = 0;
1951
1952 if (chan->do_ping)
1953 hctsiz |= TSIZ_DOPNG;
1954
1955 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
1956 dwc2_set_pid_isoc(chan);
1957
1958 /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
1959 hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
1960 TSIZ_SC_MC_PID_MASK;
1961
1962 /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
1963 hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
1964
1965 /* Non-zero only for high-speed interrupt endpoints */
1966 hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
1967
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001968 if (dbg_hc(chan)) {
1969 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
1970 chan->hc_num);
1971 dev_vdbg(hsotg->dev, " Start PID: %d\n",
1972 chan->data_pid_start);
1973 dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
1974 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001975
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001976 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001977
Gregory Herrero95105a92015-11-20 11:49:29 +01001978 dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
1979 chan->desc_list_sz, DMA_TO_DEVICE);
1980
Mian Yousaf Kaukabe23b8a52015-11-20 11:49:30 +01001981 dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001982
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02001983 if (dbg_hc(chan))
Mian Yousaf Kaukabe23b8a52015-11-20 11:49:30 +01001984 dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
1985 &chan->desc_list_addr, chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001986
Antti Seppälä95c8bc32015-08-20 21:41:07 +03001987 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07001988 hcchar &= ~HCCHAR_MULTICNT_MASK;
1989 hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
1990 HCCHAR_MULTICNT_MASK;
1991
1992 if (hcchar & HCCHAR_CHDIS)
1993 dev_warn(hsotg->dev,
1994 "%s: chdis set, channel %d, hcchar 0x%08x\n",
1995 __func__, chan->hc_num, hcchar);
1996
1997 /* Set host channel enable after all other setup is complete */
1998 hcchar |= HCCHAR_CHENA;
1999 hcchar &= ~HCCHAR_CHDIS;
2000
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002001 if (dbg_hc(chan))
2002 dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
Matthijs Kooijmand6ec53e2013-08-30 18:45:15 +02002003 (hcchar & HCCHAR_MULTICNT_MASK) >>
2004 HCCHAR_MULTICNT_SHIFT);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002005
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002006 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002007 if (dbg_hc(chan))
2008 dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
2009 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002010
2011 chan->xfer_started = 1;
2012 chan->requests++;
2013}
2014
2015/**
2016 * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
2017 * a previous call to dwc2_hc_start_transfer()
2018 *
2019 * @hsotg: Programming view of DWC_otg controller
2020 * @chan: Information needed to initialize the host channel
2021 *
2022 * The caller must ensure there is sufficient space in the request queue and Tx
2023 * Data FIFO. This function should only be called in Slave mode. In DMA mode,
2024 * the controller acts autonomously to complete transfers programmed to a host
2025 * channel.
2026 *
2027 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
2028 * if there is any data remaining to be queued. For an IN transfer, another
2029 * data packet is always requested. For the SETUP phase of a control transfer,
2030 * this function does nothing.
2031 *
2032 * Return: 1 if a new request is queued, 0 if no more requests are required
2033 * for this transfer
2034 */
2035int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
2036 struct dwc2_host_chan *chan)
2037{
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002038 if (dbg_hc(chan))
2039 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2040 chan->hc_num);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002041
2042 if (chan->do_split)
2043 /* SPLITs always queue just once per channel */
2044 return 0;
2045
2046 if (chan->data_pid_start == DWC2_HC_PID_SETUP)
2047 /* SETUPs are queued only once since they can't be NAK'd */
2048 return 0;
2049
2050 if (chan->ep_is_in) {
2051 /*
2052 * Always queue another request for other IN transfers. If
2053 * back-to-back INs are issued and NAKs are received for both,
2054 * the driver may still be processing the first NAK when the
2055 * second NAK is received. When the interrupt handler clears
2056 * the NAK interrupt for the first NAK, the second NAK will
2057 * not be seen. So we can't depend on the NAK interrupt
2058 * handler to requeue a NAK'd request. Instead, IN requests
2059 * are issued each time this function is called. When the
2060 * transfer completes, the extra requests for the channel will
2061 * be flushed.
2062 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002063 u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002064
2065 dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
2066 hcchar |= HCCHAR_CHENA;
2067 hcchar &= ~HCCHAR_CHDIS;
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002068 if (dbg_hc(chan))
2069 dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
2070 hcchar);
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002071 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002072 chan->requests++;
2073 return 1;
2074 }
2075
2076 /* OUT transfers */
2077
2078 if (chan->xfer_count < chan->xfer_len) {
2079 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2080 chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002081 u32 hcchar = dwc2_readl(hsotg->regs +
2082 HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002083
2084 dwc2_hc_set_even_odd_frame(hsotg, chan,
2085 &hcchar);
2086 }
2087
2088 /* Load OUT packet into the appropriate Tx FIFO */
2089 dwc2_hc_write_packet(hsotg, chan);
2090 chan->requests++;
2091 return 1;
2092 }
2093
2094 return 0;
2095}
2096
2097/**
2098 * dwc2_hc_do_ping() - Starts a PING transfer
2099 *
2100 * @hsotg: Programming view of DWC_otg controller
2101 * @chan: Information needed to initialize the host channel
2102 *
2103 * This function should only be called in Slave mode. The Do Ping bit is set in
2104 * the HCTSIZ register, then the channel is enabled.
2105 */
2106void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
2107{
2108 u32 hcchar;
2109 u32 hctsiz;
2110
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +02002111 if (dbg_hc(chan))
2112 dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
2113 chan->hc_num);
2114
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002115
2116 hctsiz = TSIZ_DOPNG;
2117 hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002118 dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002119
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002120 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002121 hcchar |= HCCHAR_CHENA;
2122 hcchar &= ~HCCHAR_CHDIS;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002123 dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002124}
2125
2126/**
2127 * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
2128 * the HFIR register according to PHY type and speed
2129 *
2130 * @hsotg: Programming view of DWC_otg controller
2131 *
2132 * NOTE: The caller can modify the value of the HFIR register only after the
2133 * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
2134 * has been set
2135 */
2136u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
2137{
2138 u32 usbcfg;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002139 u32 hprt0;
2140 int clock = 60; /* default value */
2141
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002142 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
2143 hprt0 = dwc2_readl(hsotg->regs + HPRT0);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002144
2145 if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
2146 !(usbcfg & GUSBCFG_PHYIF16))
2147 clock = 60;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002148 if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002149 GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
2150 clock = 48;
2151 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2152 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2153 clock = 30;
2154 if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2155 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
2156 clock = 60;
2157 if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
2158 !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
2159 clock = 48;
2160 if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002161 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002162 clock = 48;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002163 if ((usbcfg & GUSBCFG_PHYSEL) &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002164 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002165 clock = 48;
2166
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02002167 if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002168 /* High speed case */
2169 return 125 * clock;
2170 else
2171 /* FS/LS case */
2172 return 1000 * clock;
2173}
2174
2175/**
2176 * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
2177 * buffer
2178 *
2179 * @core_if: Programming view of DWC_otg controller
2180 * @dest: Destination buffer for the packet
2181 * @bytes: Number of bytes to copy to the destination
2182 */
2183void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
2184{
2185 u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
2186 u32 *data_buf = (u32 *)dest;
2187 int word_count = (bytes + 3) / 4;
2188 int i;
2189
2190 /*
2191 * Todo: Account for the case where dest is not dword aligned. This
2192 * requires reading data from the FIFO into a u32 temp buffer, then
2193 * moving it into the data buffer.
2194 */
2195
2196 dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
2197
2198 for (i = 0; i < word_count; i++, data_buf++)
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002199 *data_buf = dwc2_readl(fifo);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002200}
2201
2202/**
2203 * dwc2_dump_host_registers() - Prints the host registers
2204 *
2205 * @hsotg: Programming view of DWC_otg controller
2206 *
2207 * NOTE: This function will be removed once the peripheral controller code
2208 * is integrated and the driver is stable
2209 */
2210void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
2211{
2212#ifdef DEBUG
2213 u32 __iomem *addr;
2214 int i;
2215
2216 dev_dbg(hsotg->dev, "Host Global Registers\n");
2217 addr = hsotg->regs + HCFG;
2218 dev_dbg(hsotg->dev, "HCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002219 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002220 addr = hsotg->regs + HFIR;
2221 dev_dbg(hsotg->dev, "HFIR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002222 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002223 addr = hsotg->regs + HFNUM;
2224 dev_dbg(hsotg->dev, "HFNUM @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002225 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002226 addr = hsotg->regs + HPTXSTS;
2227 dev_dbg(hsotg->dev, "HPTXSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002228 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002229 addr = hsotg->regs + HAINT;
2230 dev_dbg(hsotg->dev, "HAINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002231 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002232 addr = hsotg->regs + HAINTMSK;
2233 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002234 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002235 if (hsotg->core_params->dma_desc_enable > 0) {
2236 addr = hsotg->regs + HFLBADDR;
2237 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002238 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002239 }
2240
2241 addr = hsotg->regs + HPRT0;
2242 dev_dbg(hsotg->dev, "HPRT0 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002243 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002244
2245 for (i = 0; i < hsotg->core_params->host_channels; i++) {
2246 dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
2247 addr = hsotg->regs + HCCHAR(i);
2248 dev_dbg(hsotg->dev, "HCCHAR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002249 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002250 addr = hsotg->regs + HCSPLT(i);
2251 dev_dbg(hsotg->dev, "HCSPLT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002252 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002253 addr = hsotg->regs + HCINT(i);
2254 dev_dbg(hsotg->dev, "HCINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002255 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002256 addr = hsotg->regs + HCINTMSK(i);
2257 dev_dbg(hsotg->dev, "HCINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002258 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002259 addr = hsotg->regs + HCTSIZ(i);
2260 dev_dbg(hsotg->dev, "HCTSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002261 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002262 addr = hsotg->regs + HCDMA(i);
2263 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002264 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002265 if (hsotg->core_params->dma_desc_enable > 0) {
2266 addr = hsotg->regs + HCDMAB(i);
2267 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002268 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002269 }
2270 }
2271#endif
2272}
2273
2274/**
2275 * dwc2_dump_global_registers() - Prints the core global registers
2276 *
2277 * @hsotg: Programming view of DWC_otg controller
2278 *
2279 * NOTE: This function will be removed once the peripheral controller code
2280 * is integrated and the driver is stable
2281 */
2282void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
2283{
2284#ifdef DEBUG
2285 u32 __iomem *addr;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002286
2287 dev_dbg(hsotg->dev, "Core Global Registers\n");
2288 addr = hsotg->regs + GOTGCTL;
2289 dev_dbg(hsotg->dev, "GOTGCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002290 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002291 addr = hsotg->regs + GOTGINT;
2292 dev_dbg(hsotg->dev, "GOTGINT @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002293 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002294 addr = hsotg->regs + GAHBCFG;
2295 dev_dbg(hsotg->dev, "GAHBCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002296 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002297 addr = hsotg->regs + GUSBCFG;
2298 dev_dbg(hsotg->dev, "GUSBCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002299 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002300 addr = hsotg->regs + GRSTCTL;
2301 dev_dbg(hsotg->dev, "GRSTCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002302 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002303 addr = hsotg->regs + GINTSTS;
2304 dev_dbg(hsotg->dev, "GINTSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002305 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002306 addr = hsotg->regs + GINTMSK;
2307 dev_dbg(hsotg->dev, "GINTMSK @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002308 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002309 addr = hsotg->regs + GRXSTSR;
2310 dev_dbg(hsotg->dev, "GRXSTSR @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002311 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002312 addr = hsotg->regs + GRXFSIZ;
2313 dev_dbg(hsotg->dev, "GRXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002314 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002315 addr = hsotg->regs + GNPTXFSIZ;
2316 dev_dbg(hsotg->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002317 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002318 addr = hsotg->regs + GNPTXSTS;
2319 dev_dbg(hsotg->dev, "GNPTXSTS @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002320 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002321 addr = hsotg->regs + GI2CCTL;
2322 dev_dbg(hsotg->dev, "GI2CCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002323 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002324 addr = hsotg->regs + GPVNDCTL;
2325 dev_dbg(hsotg->dev, "GPVNDCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002326 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002327 addr = hsotg->regs + GGPIO;
2328 dev_dbg(hsotg->dev, "GGPIO @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002329 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002330 addr = hsotg->regs + GUID;
2331 dev_dbg(hsotg->dev, "GUID @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002332 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002333 addr = hsotg->regs + GSNPSID;
2334 dev_dbg(hsotg->dev, "GSNPSID @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002335 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002336 addr = hsotg->regs + GHWCFG1;
2337 dev_dbg(hsotg->dev, "GHWCFG1 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002338 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002339 addr = hsotg->regs + GHWCFG2;
2340 dev_dbg(hsotg->dev, "GHWCFG2 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002341 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002342 addr = hsotg->regs + GHWCFG3;
2343 dev_dbg(hsotg->dev, "GHWCFG3 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002344 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002345 addr = hsotg->regs + GHWCFG4;
2346 dev_dbg(hsotg->dev, "GHWCFG4 @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002347 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002348 addr = hsotg->regs + GLPMCFG;
2349 dev_dbg(hsotg->dev, "GLPMCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002350 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002351 addr = hsotg->regs + GPWRDN;
2352 dev_dbg(hsotg->dev, "GPWRDN @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002353 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002354 addr = hsotg->regs + GDFIFOCFG;
2355 dev_dbg(hsotg->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002356 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002357 addr = hsotg->regs + HPTXFSIZ;
2358 dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002359 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002360
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002361 addr = hsotg->regs + PCGCTL;
2362 dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002363 (unsigned long)addr, dwc2_readl(addr));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002364#endif
2365}
2366
2367/**
2368 * dwc2_flush_tx_fifo() - Flushes a Tx FIFO
2369 *
2370 * @hsotg: Programming view of DWC_otg controller
2371 * @num: Tx FIFO to flush
2372 */
2373void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num)
2374{
2375 u32 greset;
2376 int count = 0;
2377
2378 dev_vdbg(hsotg->dev, "Flush Tx FIFO %d\n", num);
2379
2380 greset = GRSTCTL_TXFFLSH;
2381 greset |= num << GRSTCTL_TXFNUM_SHIFT & GRSTCTL_TXFNUM_MASK;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002382 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002383
2384 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002385 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002386 if (++count > 10000) {
2387 dev_warn(hsotg->dev,
2388 "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
2389 __func__, greset,
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002390 dwc2_readl(hsotg->regs + GNPTXSTS));
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002391 break;
2392 }
2393 udelay(1);
2394 } while (greset & GRSTCTL_TXFFLSH);
2395
2396 /* Wait for at least 3 PHY Clocks */
2397 udelay(1);
2398}
2399
2400/**
2401 * dwc2_flush_rx_fifo() - Flushes the Rx FIFO
2402 *
2403 * @hsotg: Programming view of DWC_otg controller
2404 */
2405void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
2406{
2407 u32 greset;
2408 int count = 0;
2409
2410 dev_vdbg(hsotg->dev, "%s()\n", __func__);
2411
2412 greset = GRSTCTL_RXFFLSH;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002413 dwc2_writel(greset, hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002414
2415 do {
Antti Seppälä95c8bc32015-08-20 21:41:07 +03002416 greset = dwc2_readl(hsotg->regs + GRSTCTL);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002417 if (++count > 10000) {
2418 dev_warn(hsotg->dev, "%s() HANG! GRSTCTL=%0x\n",
2419 __func__, greset);
2420 break;
2421 }
2422 udelay(1);
2423 } while (greset & GRSTCTL_RXFFLSH);
2424
2425 /* Wait for at least 3 PHY Clocks */
2426 udelay(1);
2427}
2428
Paul Zimmerman498f0662013-11-22 16:43:47 -08002429#define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002430
2431/* Parameter access functions */
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002432void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002433{
2434 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002435
2436 switch (val) {
2437 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002438 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002439 valid = 0;
2440 break;
2441 case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002442 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002443 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2444 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2445 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2446 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2447 break;
2448 default:
2449 valid = 0;
2450 break;
2451 }
2452 break;
2453 case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
2454 /* always valid */
2455 break;
2456 default:
2457 valid = 0;
2458 break;
2459 }
2460
2461 if (!valid) {
2462 if (val >= 0)
2463 dev_err(hsotg->dev,
2464 "%d invalid for otg_cap parameter. Check HW configuration.\n",
2465 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002466 switch (hsotg->hw_params.op_mode) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002467 case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
2468 val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
2469 break;
2470 case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
2471 case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
2472 case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
2473 val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
2474 break;
2475 default:
2476 val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
2477 break;
2478 }
2479 dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002480 }
2481
2482 hsotg->core_params->otg_cap = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002483}
2484
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002485void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002486{
2487 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002488
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002489 if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002490 valid = 0;
2491 if (val < 0)
2492 valid = 0;
2493
2494 if (!valid) {
2495 if (val >= 0)
2496 dev_err(hsotg->dev,
2497 "%d invalid for dma_enable parameter. Check HW configuration.\n",
2498 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002499 val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002500 dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002501 }
2502
2503 hsotg->core_params->dma_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002504}
2505
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002506void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002507{
2508 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002509
2510 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002511 !hsotg->hw_params.dma_desc_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002512 valid = 0;
2513 if (val < 0)
2514 valid = 0;
2515
2516 if (!valid) {
2517 if (val >= 0)
2518 dev_err(hsotg->dev,
2519 "%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
2520 val);
2521 val = (hsotg->core_params->dma_enable > 0 &&
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002522 hsotg->hw_params.dma_desc_enable);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002523 dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002524 }
2525
2526 hsotg->core_params->dma_desc_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002527}
2528
Mian Yousaf Kaukabfbb9e222015-11-20 11:49:28 +01002529void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
2530{
2531 int valid = 1;
2532
2533 if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
2534 !hsotg->hw_params.dma_desc_enable))
2535 valid = 0;
2536 if (val < 0)
2537 valid = 0;
2538
2539 if (!valid) {
2540 if (val >= 0)
2541 dev_err(hsotg->dev,
2542 "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
2543 val);
2544 val = (hsotg->core_params->dma_enable > 0 &&
2545 hsotg->hw_params.dma_desc_enable);
2546 }
2547
2548 hsotg->core_params->dma_desc_fs_enable = val;
2549 dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
2550}
2551
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002552void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
2553 int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002554{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002555 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002556 if (val >= 0) {
2557 dev_err(hsotg->dev,
2558 "Wrong value for host_support_fs_low_power\n");
2559 dev_err(hsotg->dev,
2560 "host_support_fs_low_power must be 0 or 1\n");
2561 }
2562 val = 0;
2563 dev_dbg(hsotg->dev,
2564 "Setting host_support_fs_low_power to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002565 }
2566
2567 hsotg->core_params->host_support_fs_ls_low_power = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002568}
2569
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002570void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002571{
2572 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002573
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002574 if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002575 valid = 0;
2576 if (val < 0)
2577 valid = 0;
2578
2579 if (!valid) {
2580 if (val >= 0)
2581 dev_err(hsotg->dev,
2582 "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
2583 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002584 val = hsotg->hw_params.enable_dynamic_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002585 dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002586 }
2587
2588 hsotg->core_params->enable_dynamic_fifo = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002589}
2590
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002591void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002592{
2593 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002594
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002595 if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002596 valid = 0;
2597
2598 if (!valid) {
2599 if (val >= 0)
2600 dev_err(hsotg->dev,
2601 "%d invalid for host_rx_fifo_size. Check HW configuration.\n",
2602 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002603 val = hsotg->hw_params.host_rx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002604 dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002605 }
2606
2607 hsotg->core_params->host_rx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002608}
2609
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002610void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002611{
2612 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002613
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002614 if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002615 valid = 0;
2616
2617 if (!valid) {
2618 if (val >= 0)
2619 dev_err(hsotg->dev,
2620 "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
2621 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002622 val = hsotg->hw_params.host_nperio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002623 dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
2624 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002625 }
2626
2627 hsotg->core_params->host_nperio_tx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002628}
2629
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002630void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002631{
2632 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002633
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002634 if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002635 valid = 0;
2636
2637 if (!valid) {
2638 if (val >= 0)
2639 dev_err(hsotg->dev,
2640 "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
2641 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002642 val = hsotg->hw_params.host_perio_tx_fifo_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002643 dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
2644 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002645 }
2646
2647 hsotg->core_params->host_perio_tx_fifo_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002648}
2649
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002650void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002651{
2652 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002653
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002654 if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002655 valid = 0;
2656
2657 if (!valid) {
2658 if (val >= 0)
2659 dev_err(hsotg->dev,
2660 "%d invalid for max_transfer_size. Check HW configuration.\n",
2661 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002662 val = hsotg->hw_params.max_transfer_size;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002663 dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002664 }
2665
2666 hsotg->core_params->max_transfer_size = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002667}
2668
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002669void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002670{
2671 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002672
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002673 if (val < 15 || val > hsotg->hw_params.max_packet_count)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002674 valid = 0;
2675
2676 if (!valid) {
2677 if (val >= 0)
2678 dev_err(hsotg->dev,
2679 "%d invalid for max_packet_count. Check HW configuration.\n",
2680 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002681 val = hsotg->hw_params.max_packet_count;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002682 dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002683 }
2684
2685 hsotg->core_params->max_packet_count = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002686}
2687
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002688void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002689{
2690 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002691
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002692 if (val < 1 || val > hsotg->hw_params.host_channels)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002693 valid = 0;
2694
2695 if (!valid) {
2696 if (val >= 0)
2697 dev_err(hsotg->dev,
2698 "%d invalid for host_channels. Check HW configuration.\n",
2699 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002700 val = hsotg->hw_params.host_channels;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002701 dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002702 }
2703
2704 hsotg->core_params->host_channels = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002705}
2706
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002707void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002708{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002709 int valid = 0;
Luis Ortega Perez de Villar0464a3d2013-09-25 13:10:50 +02002710 u32 hs_phy_type, fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002711
Paul Zimmerman498f0662013-11-22 16:43:47 -08002712 if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
2713 DWC2_PHY_TYPE_PARAM_ULPI)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002714 if (val >= 0) {
2715 dev_err(hsotg->dev, "Wrong value for phy_type\n");
2716 dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
2717 }
2718
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002719 valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002720 }
2721
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002722 hs_phy_type = hsotg->hw_params.hs_phy_type;
2723 fs_phy_type = hsotg->hw_params.fs_phy_type;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002724 if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
2725 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2726 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2727 valid = 1;
2728 else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
2729 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
2730 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
2731 valid = 1;
2732 else if (val == DWC2_PHY_TYPE_PARAM_FS &&
2733 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
2734 valid = 1;
2735
2736 if (!valid) {
2737 if (val >= 0)
2738 dev_err(hsotg->dev,
2739 "%d invalid for phy_type. Check HW configuration.\n",
2740 val);
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002741 val = DWC2_PHY_TYPE_PARAM_FS;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002742 if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
2743 if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
2744 hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
2745 val = DWC2_PHY_TYPE_PARAM_UTMI;
2746 else
2747 val = DWC2_PHY_TYPE_PARAM_ULPI;
2748 }
2749 dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002750 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002751
2752 hsotg->core_params->phy_type = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002753}
2754
2755static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
2756{
2757 return hsotg->core_params->phy_type;
2758}
2759
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002760void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002761{
2762 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002763
Paul Zimmerman498f0662013-11-22 16:43:47 -08002764 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002765 if (val >= 0) {
2766 dev_err(hsotg->dev, "Wrong value for speed parameter\n");
2767 dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
2768 }
2769 valid = 0;
2770 }
2771
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002772 if (val == DWC2_SPEED_PARAM_HIGH &&
2773 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002774 valid = 0;
2775
2776 if (!valid) {
2777 if (val >= 0)
2778 dev_err(hsotg->dev,
2779 "%d invalid for speed parameter. Check HW configuration.\n",
2780 val);
2781 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
Matthijs Kooijman929aea02013-04-29 19:36:48 +00002782 DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002783 dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002784 }
2785
2786 hsotg->core_params->speed = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002787}
2788
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002789void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002790{
2791 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002792
Paul Zimmerman498f0662013-11-22 16:43:47 -08002793 if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
2794 DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002795 if (val >= 0) {
2796 dev_err(hsotg->dev,
2797 "Wrong value for host_ls_low_power_phy_clk parameter\n");
2798 dev_err(hsotg->dev,
2799 "host_ls_low_power_phy_clk must be 0 or 1\n");
2800 }
2801 valid = 0;
2802 }
2803
2804 if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
2805 dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
2806 valid = 0;
2807
2808 if (!valid) {
2809 if (val >= 0)
2810 dev_err(hsotg->dev,
2811 "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
2812 val);
2813 val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
2814 ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
2815 : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
2816 dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
2817 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002818 }
2819
2820 hsotg->core_params->host_ls_low_power_phy_clk = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002821}
2822
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002823void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002824{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002825 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002826 if (val >= 0) {
2827 dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
2828 dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
2829 }
2830 val = 0;
2831 dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002832 }
2833
2834 hsotg->core_params->phy_ulpi_ddr = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002835}
2836
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002837void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002838{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002839 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002840 if (val >= 0) {
2841 dev_err(hsotg->dev,
2842 "Wrong value for phy_ulpi_ext_vbus\n");
2843 dev_err(hsotg->dev,
2844 "phy_ulpi_ext_vbus must be 0 or 1\n");
2845 }
2846 val = 0;
2847 dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002848 }
2849
2850 hsotg->core_params->phy_ulpi_ext_vbus = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002851}
2852
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002853void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002854{
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002855 int valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002856
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002857 switch (hsotg->hw_params.utmi_phy_data_width) {
2858 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
2859 valid = (val == 8);
2860 break;
2861 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
2862 valid = (val == 16);
2863 break;
2864 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
2865 valid = (val == 8 || val == 16);
2866 break;
2867 }
2868
2869 if (!valid) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002870 if (val >= 0) {
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002871 dev_err(hsotg->dev,
2872 "%d invalid for phy_utmi_width. Check HW configuration.\n",
2873 val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002874 }
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02002875 val = (hsotg->hw_params.utmi_phy_data_width ==
2876 GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002877 dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002878 }
2879
2880 hsotg->core_params->phy_utmi_width = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002881}
2882
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002883void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002884{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002885 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002886 if (val >= 0) {
2887 dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
2888 dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
2889 }
2890 val = 0;
2891 dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002892 }
2893
2894 hsotg->core_params->ulpi_fs_ls = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002895}
2896
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002897void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002898{
Paul Zimmerman498f0662013-11-22 16:43:47 -08002899 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002900 if (val >= 0) {
2901 dev_err(hsotg->dev, "Wrong value for ts_dline\n");
2902 dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
2903 }
2904 val = 0;
2905 dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002906 }
2907
2908 hsotg->core_params->ts_dline = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002909}
2910
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002911void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002912{
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002913 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002914
Paul Zimmerman498f0662013-11-22 16:43:47 -08002915 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002916 if (val >= 0) {
2917 dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
2918 dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
2919 }
2920
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002921 valid = 0;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002922 }
2923
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002924 if (val == 1 && !(hsotg->hw_params.i2c_enable))
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002925 valid = 0;
2926
2927 if (!valid) {
2928 if (val >= 0)
2929 dev_err(hsotg->dev,
2930 "%d invalid for i2c_enable. Check HW configuration.\n",
2931 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002932 val = hsotg->hw_params.i2c_enable;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002933 dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002934 }
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002935
2936 hsotg->core_params->i2c_enable = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002937}
2938
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002939void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002940{
2941 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002942
Paul Zimmerman498f0662013-11-22 16:43:47 -08002943 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002944 if (val >= 0) {
2945 dev_err(hsotg->dev,
2946 "Wrong value for en_multiple_tx_fifo,\n");
2947 dev_err(hsotg->dev,
2948 "en_multiple_tx_fifo must be 0 or 1\n");
2949 }
2950 valid = 0;
2951 }
2952
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002953 if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002954 valid = 0;
2955
2956 if (!valid) {
2957 if (val >= 0)
2958 dev_err(hsotg->dev,
2959 "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
2960 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002961 val = hsotg->hw_params.en_multiple_tx_fifo;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002962 dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002963 }
2964
2965 hsotg->core_params->en_multiple_tx_fifo = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002966}
2967
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002968void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002969{
2970 int valid = 1;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002971
Paul Zimmerman498f0662013-11-22 16:43:47 -08002972 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002973 if (val >= 0) {
2974 dev_err(hsotg->dev,
2975 "'%d' invalid for parameter reload_ctl\n", val);
2976 dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
2977 }
2978 valid = 0;
2979 }
2980
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002981 if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002982 valid = 0;
2983
2984 if (!valid) {
2985 if (val >= 0)
2986 dev_err(hsotg->dev,
2987 "%d invalid for parameter reload_ctl. Check HW configuration.\n",
2988 val);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02002989 val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002990 dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002991 }
2992
2993 hsotg->core_params->reload_ctl = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002994}
2995
Paul Zimmerman7218dae2013-11-22 16:43:48 -08002996void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07002997{
Paul Zimmerman4d3190e2013-07-16 12:22:12 -07002998 if (val != -1)
2999 hsotg->core_params->ahbcfg = val;
3000 else
Matthijs Kooijmanf9234632013-08-30 18:45:13 +02003001 hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
Luis Ortega Perez de Villar0464a3d2013-09-25 13:10:50 +02003002 GAHBCFG_HBSTLEN_SHIFT;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003003}
3004
Paul Zimmerman7218dae2013-11-22 16:43:48 -08003005void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003006{
Paul Zimmerman498f0662013-11-22 16:43:47 -08003007 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003008 if (val >= 0) {
3009 dev_err(hsotg->dev,
3010 "'%d' invalid for parameter otg_ver\n", val);
3011 dev_err(hsotg->dev,
3012 "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
3013 }
3014 val = 0;
3015 dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003016 }
3017
3018 hsotg->core_params->otg_ver = val;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003019}
3020
Wei Yongjun49cf10c2013-11-28 10:27:59 +08003021static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
Paul Zimmermane8576e62013-11-25 13:42:47 -08003022{
3023 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3024 if (val >= 0) {
3025 dev_err(hsotg->dev,
3026 "'%d' invalid for parameter uframe_sched\n",
3027 val);
3028 dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
3029 }
3030 val = 1;
3031 dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
3032 }
3033
3034 hsotg->core_params->uframe_sched = val;
3035}
3036
Gregory Herreroa6d249d2015-04-29 22:09:04 +02003037static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
3038 int val)
3039{
3040 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3041 if (val >= 0) {
3042 dev_err(hsotg->dev,
3043 "'%d' invalid for parameter external_id_pin_ctl\n",
3044 val);
3045 dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
3046 }
3047 val = 0;
3048 dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
3049 }
3050
3051 hsotg->core_params->external_id_pin_ctl = val;
3052}
3053
Gregory Herrero285046a2015-04-29 22:09:19 +02003054static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
3055 int val)
3056{
3057 if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
3058 if (val >= 0) {
3059 dev_err(hsotg->dev,
3060 "'%d' invalid for parameter hibernation\n",
3061 val);
3062 dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
3063 }
3064 val = 0;
3065 dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
3066 }
3067
3068 hsotg->core_params->hibernation = val;
3069}
3070
Paul Zimmermane8576e62013-11-25 13:42:47 -08003071/*
3072 * This function is called during module intialization to pass module parameters
3073 * for the DWC_otg core.
3074 */
3075void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
3076 const struct dwc2_core_params *params)
3077{
3078 dev_dbg(hsotg->dev, "%s()\n", __func__);
3079
3080 dwc2_set_param_otg_cap(hsotg, params->otg_cap);
3081 dwc2_set_param_dma_enable(hsotg, params->dma_enable);
3082 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
Mian Yousaf Kaukabfbb9e222015-11-20 11:49:28 +01003083 dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
Paul Zimmermane8576e62013-11-25 13:42:47 -08003084 dwc2_set_param_host_support_fs_ls_low_power(hsotg,
3085 params->host_support_fs_ls_low_power);
3086 dwc2_set_param_enable_dynamic_fifo(hsotg,
3087 params->enable_dynamic_fifo);
3088 dwc2_set_param_host_rx_fifo_size(hsotg,
3089 params->host_rx_fifo_size);
3090 dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
3091 params->host_nperio_tx_fifo_size);
3092 dwc2_set_param_host_perio_tx_fifo_size(hsotg,
3093 params->host_perio_tx_fifo_size);
3094 dwc2_set_param_max_transfer_size(hsotg,
3095 params->max_transfer_size);
3096 dwc2_set_param_max_packet_count(hsotg,
3097 params->max_packet_count);
3098 dwc2_set_param_host_channels(hsotg, params->host_channels);
3099 dwc2_set_param_phy_type(hsotg, params->phy_type);
3100 dwc2_set_param_speed(hsotg, params->speed);
3101 dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
3102 params->host_ls_low_power_phy_clk);
3103 dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
3104 dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
3105 params->phy_ulpi_ext_vbus);
3106 dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
3107 dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
3108 dwc2_set_param_ts_dline(hsotg, params->ts_dline);
3109 dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
3110 dwc2_set_param_en_multiple_tx_fifo(hsotg,
3111 params->en_multiple_tx_fifo);
3112 dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
3113 dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
3114 dwc2_set_param_otg_ver(hsotg, params->otg_ver);
3115 dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
Gregory Herreroa6d249d2015-04-29 22:09:04 +02003116 dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
Gregory Herrero285046a2015-04-29 22:09:19 +02003117 dwc2_set_param_hibernation(hsotg, params->hibernation);
Paul Zimmermane8576e62013-11-25 13:42:47 -08003118}
3119
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003120/**
3121 * During device initialization, read various hardware configuration
3122 * registers and interpret the contents.
3123 */
3124int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
3125{
3126 struct dwc2_hw_params *hw = &hsotg->hw_params;
3127 unsigned width;
3128 u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
3129 u32 hptxfsiz, grxfsiz, gnptxfsiz;
Douglas Andersonf6194732015-12-17 11:14:54 -08003130 u32 gusbcfg = 0;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003131
3132 /*
3133 * Attempt to ensure this device is really a DWC_otg Controller.
3134 * Read and verify the GSNPSID register contents. The value should be
3135 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
3136 * as in "OTG version 2.xx" or "OTG version 3.xx".
3137 */
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003138 hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003139 if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
3140 (hw->snpsid & 0xfffff000) != 0x4f543000) {
3141 dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
3142 hw->snpsid);
3143 return -ENODEV;
3144 }
3145
3146 dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
3147 hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
3148 hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
3149
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003150 hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
3151 hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
3152 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
3153 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
3154 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003155
3156 dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
3157 dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
3158 dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
3159 dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003160 dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
3161
Doug Anderson2867c052014-08-07 12:48:11 -07003162 /* Force host mode to get HPTXFSIZ / GNPTXFSIZ exact power on value */
Douglas Andersonf6194732015-12-17 11:14:54 -08003163 if (hsotg->dr_mode != USB_DR_MODE_HOST) {
3164 gusbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
Douglas Anderson99182462015-12-17 11:14:12 -08003165 dwc2_writel(gusbcfg | GUSBCFG_FORCEHOSTMODE,
3166 hsotg->regs + GUSBCFG);
Yunzhi Li20bde642015-12-17 11:15:08 -08003167 usleep_range(25000, 50000);
Douglas Anderson99182462015-12-17 11:14:12 -08003168 }
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003169
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003170 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
3171 hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
Doug Anderson2867c052014-08-07 12:48:11 -07003172 dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003173 dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
Douglas Andersonf6194732015-12-17 11:14:54 -08003174 if (hsotg->dr_mode != USB_DR_MODE_HOST) {
Douglas Anderson99182462015-12-17 11:14:12 -08003175 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
Yunzhi Li20bde642015-12-17 11:15:08 -08003176 usleep_range(25000, 50000);
Douglas Anderson99182462015-12-17 11:14:12 -08003177 }
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003178
3179 /* hwcfg2 */
3180 hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3181 GHWCFG2_OP_MODE_SHIFT;
3182 hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
3183 GHWCFG2_ARCHITECTURE_SHIFT;
3184 hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
3185 hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
3186 GHWCFG2_NUM_HOST_CHAN_SHIFT);
3187 hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
3188 GHWCFG2_HS_PHY_TYPE_SHIFT;
3189 hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
3190 GHWCFG2_FS_PHY_TYPE_SHIFT;
3191 hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
3192 GHWCFG2_NUM_DEV_EP_SHIFT;
3193 hw->nperio_tx_q_depth =
3194 (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
3195 GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
3196 hw->host_perio_tx_q_depth =
3197 (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
3198 GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
3199 hw->dev_token_q_depth =
3200 (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
3201 GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
3202
3203 /* hwcfg3 */
3204 width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
3205 GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
3206 hw->max_transfer_size = (1 << (width + 11)) - 1;
Paul Zimmermane8f8c142014-09-16 13:47:26 -07003207 /*
3208 * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
3209 * coherent buffers with this size, and if it's too large we can
3210 * exhaust the coherent DMA pool.
3211 */
3212 if (hw->max_transfer_size > 65535)
3213 hw->max_transfer_size = 65535;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003214 width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
3215 GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
3216 hw->max_packet_count = (1 << (width + 4)) - 1;
3217 hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
3218 hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
3219 GHWCFG3_DFIFO_DEPTH_SHIFT;
3220
3221 /* hwcfg4 */
3222 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
3223 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
3224 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
3225 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
3226 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02003227 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
3228 GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003229
3230 /* fifo sizes */
3231 hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
3232 GRXFSIZ_DEPTH_SHIFT;
3233 hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3234 FIFOSIZE_DEPTH_SHIFT;
3235 hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
3236 FIFOSIZE_DEPTH_SHIFT;
3237
3238 dev_dbg(hsotg->dev, "Detected values from hardware:\n");
3239 dev_dbg(hsotg->dev, " op_mode=%d\n",
3240 hw->op_mode);
3241 dev_dbg(hsotg->dev, " arch=%d\n",
3242 hw->arch);
3243 dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
3244 hw->dma_desc_enable);
3245 dev_dbg(hsotg->dev, " power_optimized=%d\n",
3246 hw->power_optimized);
3247 dev_dbg(hsotg->dev, " i2c_enable=%d\n",
3248 hw->i2c_enable);
3249 dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
3250 hw->hs_phy_type);
3251 dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
3252 hw->fs_phy_type);
Masanari Iida971bd8f2015-05-20 23:54:02 +09003253 dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n",
Matthijs Kooijmande4a1932013-08-30 18:45:22 +02003254 hw->utmi_phy_data_width);
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003255 dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
3256 hw->num_dev_ep);
3257 dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
3258 hw->num_dev_perio_in_ep);
3259 dev_dbg(hsotg->dev, " host_channels=%d\n",
3260 hw->host_channels);
3261 dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
3262 hw->max_transfer_size);
3263 dev_dbg(hsotg->dev, " max_packet_count=%d\n",
3264 hw->max_packet_count);
3265 dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
3266 hw->nperio_tx_q_depth);
3267 dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
3268 hw->host_perio_tx_q_depth);
3269 dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
3270 hw->dev_token_q_depth);
3271 dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
3272 hw->enable_dynamic_fifo);
3273 dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
3274 hw->en_multiple_tx_fifo);
3275 dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
3276 hw->total_fifo_size);
3277 dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
3278 hw->host_rx_fifo_size);
3279 dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
3280 hw->host_nperio_tx_fifo_size);
3281 dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
3282 hw->host_perio_tx_fifo_size);
3283 dev_dbg(hsotg->dev, "\n");
3284
3285 return 0;
3286}
Mian Yousaf Kaukabecb176c2015-04-29 22:09:05 +02003287
3288/*
3289 * Sets all parameters to the given value.
3290 *
3291 * Assumes that the dwc2_core_params struct contains only integers.
3292 */
3293void dwc2_set_all_params(struct dwc2_core_params *params, int value)
3294{
3295 int *p = (int *)params;
3296 size_t size = sizeof(*params) / sizeof(*p);
3297 int i;
3298
3299 for (i = 0; i < size; i++)
3300 p[i] = value;
3301}
Mian Yousaf Kaukabecb176c2015-04-29 22:09:05 +02003302
Matthijs Kooijman9badec22013-08-30 18:45:21 +02003303
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003304u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
3305{
Paul Zimmermanb66a3f02013-11-22 16:43:50 -08003306 return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003307}
3308
Paul Zimmerman057715f2013-11-22 16:43:51 -08003309bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003310{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003311 if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff)
Paul Zimmerman057715f2013-11-22 16:43:51 -08003312 return false;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003313 else
Paul Zimmerman057715f2013-11-22 16:43:51 -08003314 return true;
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003315}
3316
3317/**
3318 * dwc2_enable_global_interrupts() - Enables the controller's Global
3319 * Interrupt in the AHB Config register
3320 *
3321 * @hsotg: Programming view of DWC_otg controller
3322 */
3323void dwc2_enable_global_interrupts(struct dwc2_hsotg *hsotg)
3324{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003325 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003326
3327 ahbcfg |= GAHBCFG_GLBL_INTR_EN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003328 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003329}
3330
3331/**
3332 * dwc2_disable_global_interrupts() - Disables the controller's Global
3333 * Interrupt in the AHB Config register
3334 *
3335 * @hsotg: Programming view of DWC_otg controller
3336 */
3337void dwc2_disable_global_interrupts(struct dwc2_hsotg *hsotg)
3338{
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003339 u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003340
3341 ahbcfg &= ~GAHBCFG_GLBL_INTR_EN;
Antti Seppälä95c8bc32015-08-20 21:41:07 +03003342 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003343}
3344
John Youn6bea9622015-12-17 11:16:17 -08003345/* Returns the controller's GHWCFG2.OTG_MODE. */
3346unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg)
3347{
3348 u32 ghwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
3349
3350 return (ghwcfg2 & GHWCFG2_OP_MODE_MASK) >>
3351 GHWCFG2_OP_MODE_SHIFT;
3352}
3353
3354/* Returns true if the controller is capable of DRD. */
3355bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg)
3356{
3357 unsigned op_mode = dwc2_op_mode(hsotg);
3358
3359 return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) ||
3360 (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) ||
3361 (op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE);
3362}
3363
3364/* Returns true if the controller is host-only. */
3365bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg)
3366{
3367 unsigned op_mode = dwc2_op_mode(hsotg);
3368
3369 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) ||
3370 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST);
3371}
3372
3373/* Returns true if the controller is device-only. */
3374bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg)
3375{
3376 unsigned op_mode = dwc2_op_mode(hsotg);
3377
3378 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
3379 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE);
3380}
3381
Paul Zimmerman56f5b1c2013-03-11 17:47:58 -07003382MODULE_DESCRIPTION("DESIGNWARE HS OTG Core");
3383MODULE_AUTHOR("Synopsys, Inc.");
3384MODULE_LICENSE("Dual BSD/GPL");