blob: f2116f9ee95fef9e4f9e470dce4b0a2f4dc00056 [file] [log] [blame]
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
Manu Gautamb5067272012-07-02 09:53:41 +053019#include <linux/pm_runtime.h>
20#include <linux/interrupt.h>
Ido Shayevitzef72ddd2012-03-28 18:55:55 +020021#include <linux/ioport.h>
Manu Gautam1742db22012-06-19 13:33:24 +053022#include <linux/clk.h>
Ido Shayevitzef72ddd2012-03-28 18:55:55 +020023#include <linux/io.h>
24#include <linux/module.h>
25#include <linux/types.h>
Ido Shayevitzef72ddd2012-03-28 18:55:55 +020026#include <linux/delay.h>
27#include <linux/of.h>
Ido Shayevitz9fb83452012-04-01 17:45:58 +030028#include <linux/list.h>
Manu Gautamb5067272012-07-02 09:53:41 +053029#include <linux/debugfs.h>
30#include <linux/uaccess.h>
Ido Shayevitz9fb83452012-04-01 17:45:58 +030031#include <linux/usb/ch9.h>
32#include <linux/usb/gadget.h>
33#include <linux/usb/msm_hsusb.h>
Manu Gautam60e01352012-05-29 09:00:34 +053034#include <linux/regulator/consumer.h>
35
36#include <mach/rpm-regulator.h>
Ido Shayevitz9fb83452012-04-01 17:45:58 +030037
Manu Gautam8c642812012-06-07 10:35:10 +053038#include "dwc3_otg.h"
Ido Shayevitz9fb83452012-04-01 17:45:58 +030039#include "core.h"
40#include "gadget.h"
41
42/**
43 * USB DBM Hardware registers.
44 *
45 */
Shimrit Malichia00d7322012-08-05 13:56:28 +030046#define DBM_BASE 0x000F8000
47#define DBM_EP_CFG(n) (DBM_BASE + (0x00 + 4 * (n)))
48#define DBM_DATA_FIFO(n) (DBM_BASE + (0x10 + 4 * (n)))
49#define DBM_DATA_FIFO_SIZE(n) (DBM_BASE + (0x20 + 4 * (n)))
50#define DBM_DATA_FIFO_EN (DBM_BASE + (0x30))
51#define DBM_GEVNTADR (DBM_BASE + (0x34))
52#define DBM_GEVNTSIZ (DBM_BASE + (0x38))
53#define DBM_DBG_CNFG (DBM_BASE + (0x3C))
54#define DBM_HW_TRB0_EP(n) (DBM_BASE + (0x40 + 4 * (n)))
55#define DBM_HW_TRB1_EP(n) (DBM_BASE + (0x50 + 4 * (n)))
56#define DBM_HW_TRB2_EP(n) (DBM_BASE + (0x60 + 4 * (n)))
57#define DBM_HW_TRB3_EP(n) (DBM_BASE + (0x70 + 4 * (n)))
58#define DBM_PIPE_CFG (DBM_BASE + (0x80))
59#define DBM_SOFT_RESET (DBM_BASE + (0x84))
60#define DBM_GEN_CFG (DBM_BASE + (0x88))
Ido Shayevitz9fb83452012-04-01 17:45:58 +030061
62/**
63 * USB DBM Hardware registers bitmask.
64 *
65 */
66/* DBM_EP_CFG */
Shimrit Malichia00d7322012-08-05 13:56:28 +030067#define DBM_EN_EP 0x00000001
68#define USB3_EPNUM 0x0000003E
Ido Shayevitz9fb83452012-04-01 17:45:58 +030069#define DBM_BAM_PIPE_NUM 0x000000C0
70#define DBM_PRODUCER 0x00000100
71#define DBM_DISABLE_WB 0x00000200
72#define DBM_INT_RAM_ACC 0x00000400
73
74/* DBM_DATA_FIFO_SIZE */
75#define DBM_DATA_FIFO_SIZE_MASK 0x0000ffff
76
77/* DBM_GEVNTSIZ */
78#define DBM_GEVNTSIZ_MASK 0x0000ffff
79
80/* DBM_DBG_CNFG */
81#define DBM_ENABLE_IOC_MASK 0x0000000f
82
83/* DBM_SOFT_RESET */
84#define DBM_SFT_RST_EP0 0x00000001
85#define DBM_SFT_RST_EP1 0x00000002
86#define DBM_SFT_RST_EP2 0x00000004
87#define DBM_SFT_RST_EP3 0x00000008
Shimrit Malichia00d7322012-08-05 13:56:28 +030088#define DBM_SFT_RST_EPS_MASK 0x0000000F
89#define DBM_SFT_RST_MASK 0x80000000
90#define DBM_EN_MASK 0x00000002
Ido Shayevitzef72ddd2012-03-28 18:55:55 +020091
92#define DBM_MAX_EPS 4
93
Ido Shayevitzfa65a582012-06-06 14:39:54 +030094/* DBM TRB configurations */
95#define DBM_TRB_BIT 0x80000000
96#define DBM_TRB_DATA_SRC 0x40000000
97#define DBM_TRB_DMA 0x20000000
98#define DBM_TRB_EP_NUM(ep) (ep<<24)
Shimrit Malichia00d7322012-08-05 13:56:28 +030099
Manu Gautam8c642812012-06-07 10:35:10 +0530100/**
101 * USB QSCRATCH Hardware registers
102 *
103 */
104#define QSCRATCH_REG_OFFSET (0x000F8800)
Shimrit Malichia00d7322012-08-05 13:56:28 +0300105#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
Manu Gautam8c642812012-06-07 10:35:10 +0530106#define CHARGING_DET_CTRL_REG (QSCRATCH_REG_OFFSET + 0x18)
107#define CHARGING_DET_OUTPUT_REG (QSCRATCH_REG_OFFSET + 0x1C)
108#define ALT_INTERRUPT_EN_REG (QSCRATCH_REG_OFFSET + 0x20)
109#define HS_PHY_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x24)
110
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300111struct dwc3_msm_req_complete {
112 struct list_head list_item;
113 struct usb_request *req;
114 void (*orig_complete)(struct usb_ep *ep,
115 struct usb_request *req);
116};
117
Ido Shayevitzef72ddd2012-03-28 18:55:55 +0200118struct dwc3_msm {
119 struct platform_device *dwc3;
120 struct device *dev;
121 void __iomem *base;
122 u32 resource_size;
123 int dbm_num_eps;
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300124 u8 ep_num_mapping[DBM_MAX_EPS];
125 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
126 struct list_head req_complete_list;
Manu Gautam3e9ad352012-08-16 14:44:47 -0700127 struct clk *ref_clk;
Manu Gautam1742db22012-06-19 13:33:24 +0530128 struct clk *core_clk;
Manu Gautam3e9ad352012-08-16 14:44:47 -0700129 struct clk *iface_clk;
130 struct clk *sleep_clk;
131 struct clk *hsphy_sleep_clk;
Manu Gautam60e01352012-05-29 09:00:34 +0530132 struct regulator *hsusb_3p3;
133 struct regulator *hsusb_1p8;
134 struct regulator *hsusb_vddcx;
135 struct regulator *ssusb_1p8;
136 struct regulator *ssusb_vddcx;
137 enum usb_vdd_type ss_vdd_type;
138 enum usb_vdd_type hs_vdd_type;
Manu Gautamb5067272012-07-02 09:53:41 +0530139 struct dwc3_ext_xceiv ext_xceiv;
140 bool resume_pending;
141 atomic_t pm_suspended;
142 atomic_t in_lpm;
143 struct delayed_work resume_work;
144 struct wake_lock wlock;
Manu Gautam8c642812012-06-07 10:35:10 +0530145 struct dwc3_charger charger;
146 struct usb_phy *otg_xceiv;
147 struct delayed_work chg_work;
148 enum usb_chg_state chg_state;
149 u8 dcd_retries;
Manu Gautam60e01352012-05-29 09:00:34 +0530150};
151
152#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
153#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
154#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
155
156#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
157#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
158#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
159
160#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
161#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
162#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
163
164#define USB_PHY_VDD_DIG_VOL_NONE 0 /* uV */
165#define USB_PHY_VDD_DIG_VOL_MIN 1045000 /* uV */
166#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
167
Manu Gautam60e01352012-05-29 09:00:34 +0530168static const int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
169 { /* VDD_CX CORNER Voting */
170 [VDD_NONE] = RPM_VREG_CORNER_NONE,
171 [VDD_MIN] = RPM_VREG_CORNER_NOMINAL,
172 [VDD_MAX] = RPM_VREG_CORNER_HIGH,
173 },
174 { /* VDD_CX Voltage Voting */
175 [VDD_NONE] = USB_PHY_VDD_DIG_VOL_NONE,
176 [VDD_MIN] = USB_PHY_VDD_DIG_VOL_MIN,
177 [VDD_MAX] = USB_PHY_VDD_DIG_VOL_MAX,
178 },
Ido Shayevitzef72ddd2012-03-28 18:55:55 +0200179};
180
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300181static struct dwc3_msm *context;
Ido Shayevitzc9e92e92012-05-30 14:36:35 +0300182static u64 dwc3_msm_dma_mask = DMA_BIT_MASK(64);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300183
184/**
185 *
186 * Read register with debug info.
187 *
188 * @base - DWC3 base virtual address.
189 * @offset - register offset.
190 *
191 * @return u32
192 */
193static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
194{
195 u32 val = ioread32(base + offset);
196 return val;
197}
198
199/**
200 * Read register masked field with debug info.
201 *
202 * @base - DWC3 base virtual address.
203 * @offset - register offset.
204 * @mask - register bitmask.
205 *
206 * @return u32
207 */
208static inline u32 dwc3_msm_read_reg_field(void *base,
209 u32 offset,
210 const u32 mask)
211{
212 u32 shift = find_first_bit((void *)&mask, 32);
213 u32 val = ioread32(base + offset);
214 val &= mask; /* clear other bits */
215 val >>= shift;
216 return val;
217}
218
219/**
220 *
221 * Write register with debug info.
222 *
223 * @base - DWC3 base virtual address.
224 * @offset - register offset.
225 * @val - value to write.
226 *
227 */
228static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
229{
230 iowrite32(val, base + offset);
231}
232
233/**
234 * Write register masked field with debug info.
235 *
236 * @base - DWC3 base virtual address.
237 * @offset - register offset.
238 * @mask - register bitmask.
239 * @val - value to write.
240 *
241 */
242static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
243 const u32 mask, u32 val)
244{
245 u32 shift = find_first_bit((void *)&mask, 32);
246 u32 tmp = ioread32(base + offset);
247
248 tmp &= ~mask; /* clear written bits */
249 val = tmp | (val << shift);
250 iowrite32(val, base + offset);
251}
252
253/**
Manu Gautam8c642812012-06-07 10:35:10 +0530254 * Write register and read back masked value to confirm it is written
255 *
256 * @base - DWC3 base virtual address.
257 * @offset - register offset.
258 * @mask - register bitmask specifying what should be updated
259 * @val - value to write.
260 *
261 */
262static inline void dwc3_msm_write_readback(void *base, u32 offset,
263 const u32 mask, u32 val)
264{
265 u32 write_val, tmp = ioread32(base + offset);
266
267 tmp &= ~mask; /* retain other bits */
268 write_val = tmp | val;
269
270 iowrite32(write_val, base + offset);
271
272 /* Read back to see if val was written */
273 tmp = ioread32(base + offset);
274 tmp &= mask; /* clear other bits */
275
276 if (tmp != val)
277 dev_err(context->dev, "%s: write: %x to QSCRATCH: %x FAILED\n",
278 __func__, val, offset);
279}
280
281/**
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300282 * Return DBM EP number according to usb endpoint number.
283 *
284 */
285static int dwc3_msm_find_matching_dbm_ep(u8 usb_ep)
286{
287 int i;
288
289 for (i = 0; i < context->dbm_num_eps; i++)
290 if (context->ep_num_mapping[i] == usb_ep)
291 return i;
292
293 return -ENODEV; /* Not found */
294}
295
296/**
297 * Return number of configured DBM endpoints.
298 *
299 */
300static int dwc3_msm_configured_dbm_ep_num(void)
301{
302 int i;
303 int count = 0;
304
305 for (i = 0; i < context->dbm_num_eps; i++)
306 if (context->ep_num_mapping[i])
307 count++;
308
309 return count;
310}
311
312/**
313 * Configure the DBM with the USB3 core event buffer.
314 * This function is called by the SNPS UDC upon initialization.
315 *
316 * @addr - address of the event buffer.
317 * @size - size of the event buffer.
318 *
319 */
320static int dwc3_msm_event_buffer_config(u32 addr, u16 size)
321{
322 dev_dbg(context->dev, "%s\n", __func__);
323
324 dwc3_msm_write_reg(context->base, DBM_GEVNTADR, addr);
325 dwc3_msm_write_reg_field(context->base, DBM_GEVNTSIZ,
326 DBM_GEVNTSIZ_MASK, size);
327
328 return 0;
329}
330
331/**
332 * Reset the DBM registers upon initialization.
333 *
334 */
Shimrit Malichia00d7322012-08-05 13:56:28 +0300335static int dwc3_msm_dbm_soft_reset(int enter_reset)
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300336{
337 dev_dbg(context->dev, "%s\n", __func__);
Shimrit Malichia00d7322012-08-05 13:56:28 +0300338 if (enter_reset) {
339 dev_dbg(context->dev, "enter DBM reset\n");
340 dwc3_msm_write_reg_field(context->base, DBM_SOFT_RESET,
341 DBM_SFT_RST_MASK, 1);
342 } else {
343 dev_dbg(context->dev, "exit DBM reset\n");
344 dwc3_msm_write_reg_field(context->base, DBM_SOFT_RESET,
345 DBM_SFT_RST_MASK, 0);
346 /*enable DBM*/
347 dwc3_msm_write_reg_field(context->base, QSCRATCH_GENERAL_CFG,
348 DBM_EN_MASK, 0x1);
349 }
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300350
351 return 0;
352}
353
354/**
355 * Soft reset specific DBM ep.
356 * This function is called by the function driver upon events
357 * such as transfer aborting, USB re-enumeration and USB
358 * disconnection.
359 *
360 * @dbm_ep - DBM ep number.
361 * @enter_reset - should we enter a reset state or get out of it.
362 *
363 */
364static int dwc3_msm_dbm_ep_soft_reset(u8 dbm_ep, bool enter_reset)
365{
366 dev_dbg(context->dev, "%s\n", __func__);
367
368 if (dbm_ep >= context->dbm_num_eps) {
369 dev_err(context->dev,
370 "%s: Invalid DBM ep index\n", __func__);
371 return -ENODEV;
372 }
373
374 if (enter_reset) {
375 dwc3_msm_write_reg_field(context->base, DBM_SOFT_RESET,
Shimrit Malichia00d7322012-08-05 13:56:28 +0300376 DBM_SFT_RST_EPS_MASK & 1 << dbm_ep, 1);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300377 } else {
378 dwc3_msm_write_reg_field(context->base, DBM_SOFT_RESET,
Shimrit Malichia00d7322012-08-05 13:56:28 +0300379 DBM_SFT_RST_EPS_MASK & 1 << dbm_ep, 0);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300380 }
381
382 return 0;
383}
384
385/**
386 * Configure a USB DBM ep to work in BAM mode.
387 *
388 *
389 * @usb_ep - USB physical EP number.
390 * @producer - producer/consumer.
391 * @disable_wb - disable write back to system memory.
392 * @internal_mem - use internal USB memory for data fifo.
393 * @ioc - enable interrupt on completion.
394 *
395 * @return int - DBM ep number.
396 */
397static int dwc3_msm_dbm_ep_config(u8 usb_ep, u8 bam_pipe,
398 bool producer, bool disable_wb,
399 bool internal_mem, bool ioc)
400{
401 u8 dbm_ep;
Shimrit Malichia00d7322012-08-05 13:56:28 +0300402 u32 ep_cfg;
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300403
404 dev_dbg(context->dev, "%s\n", __func__);
405
Shimrit Malichia00d7322012-08-05 13:56:28 +0300406 dbm_ep = dwc3_msm_find_matching_dbm_ep(usb_ep);
407
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300408 if (dbm_ep < 0) {
Shimrit Malichia00d7322012-08-05 13:56:28 +0300409 dev_err(context->dev,
410 "%s: Invalid usb ep index\n", __func__);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300411 return -ENODEV;
412 }
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300413 /* First, reset the dbm endpoint */
Shimrit Malichia00d7322012-08-05 13:56:28 +0300414 dwc3_msm_dbm_ep_soft_reset(dbm_ep, 0);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300415
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300416 /* Set ioc bit for dbm_ep if needed */
417 dwc3_msm_write_reg_field(context->base, DBM_DBG_CNFG,
Shimrit Malichia00d7322012-08-05 13:56:28 +0300418 DBM_ENABLE_IOC_MASK & 1 << dbm_ep, ioc ? 1 : 0);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300419
Shimrit Malichia00d7322012-08-05 13:56:28 +0300420 ep_cfg = (producer ? DBM_PRODUCER : 0) |
421 (disable_wb ? DBM_DISABLE_WB : 0) |
422 (internal_mem ? DBM_INT_RAM_ACC : 0);
423
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300424 dwc3_msm_write_reg_field(context->base, DBM_EP_CFG(dbm_ep),
Shimrit Malichia00d7322012-08-05 13:56:28 +0300425 DBM_PRODUCER | DBM_DISABLE_WB | DBM_INT_RAM_ACC, ep_cfg >> 8);
426
427 dwc3_msm_write_reg_field(context->base, DBM_EP_CFG(dbm_ep), USB3_EPNUM,
428 usb_ep);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300429 dwc3_msm_write_reg_field(context->base, DBM_EP_CFG(dbm_ep),
430 DBM_BAM_PIPE_NUM, bam_pipe);
Shimrit Malichia00d7322012-08-05 13:56:28 +0300431 dwc3_msm_write_reg_field(context->base, DBM_PIPE_CFG, 0x000000ff,
432 0xe4);
433 dwc3_msm_write_reg_field(context->base, DBM_EP_CFG(dbm_ep), DBM_EN_EP,
434 1);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300435
436 return dbm_ep;
437}
438
439/**
440 * Configure a USB DBM ep to work in normal mode.
441 *
442 * @usb_ep - USB ep number.
443 *
444 */
445static int dwc3_msm_dbm_ep_unconfig(u8 usb_ep)
446{
447 u8 dbm_ep;
448
449 dev_dbg(context->dev, "%s\n", __func__);
450
451 dbm_ep = dwc3_msm_find_matching_dbm_ep(usb_ep);
452
453 if (dbm_ep < 0) {
454 dev_err(context->dev,
455 "%s: Invalid usb ep index\n", __func__);
456 return -ENODEV;
457 }
458
459 context->ep_num_mapping[dbm_ep] = 0;
460
461 dwc3_msm_write_reg(context->base, DBM_EP_CFG(dbm_ep), 0);
462
463 /* Reset the dbm endpoint */
464 dwc3_msm_dbm_ep_soft_reset(dbm_ep, true);
465
466 return 0;
467}
468
469/**
470 * Configure the DBM with the BAM's data fifo.
471 * This function is called by the USB BAM Driver
472 * upon initialization.
473 *
474 * @ep - pointer to usb endpoint.
475 * @addr - address of data fifo.
476 * @size - size of data fifo.
477 *
478 */
Shimrit Malichia00d7322012-08-05 13:56:28 +0300479int msm_data_fifo_config(struct usb_ep *ep, u32 addr, u32 size, u8 dst_pipe_idx)
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300480{
481 u8 dbm_ep;
482 struct dwc3_ep *dep = to_dwc3_ep(ep);
Shimrit Malichia00d7322012-08-05 13:56:28 +0300483 u8 bam_pipe = dst_pipe_idx;
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300484
485 dev_dbg(context->dev, "%s\n", __func__);
486
Shimrit Malichia00d7322012-08-05 13:56:28 +0300487 dbm_ep = bam_pipe;
488 context->ep_num_mapping[dbm_ep] = dep->number;
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300489
490 dwc3_msm_write_reg(context->base, DBM_DATA_FIFO(dbm_ep), addr);
491 dwc3_msm_write_reg_field(context->base, DBM_DATA_FIFO_SIZE(dbm_ep),
492 DBM_DATA_FIFO_SIZE_MASK, size);
493
494 return 0;
495}
496
497/**
498* Cleanups for msm endpoint on request complete.
499*
500* Also call original request complete.
501*
502* @usb_ep - pointer to usb_ep instance.
503* @request - pointer to usb_request instance.
504*
505* @return int - 0 on success, negetive on error.
506*/
507static void dwc3_msm_req_complete_func(struct usb_ep *ep,
508 struct usb_request *request)
509{
510 struct dwc3_request *req = to_dwc3_request(request);
511 struct dwc3_ep *dep = to_dwc3_ep(ep);
512 struct dwc3_msm_req_complete *req_complete = NULL;
513
514 /* Find original request complete function and remove it from list */
515 list_for_each_entry(req_complete,
516 &context->req_complete_list,
517 list_item) {
518 if (req_complete->req == request)
519 break;
520 }
521 if (!req_complete || req_complete->req != request) {
522 dev_err(dep->dwc->dev, "%s: could not find the request\n",
523 __func__);
524 return;
525 }
526 list_del(&req_complete->list_item);
527
528 /*
529 * Release another one TRB to the pool since DBM queue took 2 TRBs
530 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
531 * released only one.
532 */
533 if (req->queued)
534 dep->busy_slot++;
535
536 /* Unconfigure dbm ep */
537 dwc3_msm_dbm_ep_unconfig(dep->number);
538
539 /*
540 * If this is the last endpoint we unconfigured, than reset also
541 * the event buffers.
542 */
543 if (0 == dwc3_msm_configured_dbm_ep_num())
544 dwc3_msm_event_buffer_config(0, 0);
545
546 /*
547 * Call original complete function, notice that dwc->lock is already
548 * taken by the caller of this function (dwc3_gadget_giveback()).
549 */
550 request->complete = req_complete->orig_complete;
Shimrit Malichia00d7322012-08-05 13:56:28 +0300551 if (request->complete)
552 request->complete(ep, request);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300553
554 kfree(req_complete);
555}
556
557/**
558* Helper function.
559* See the header of the dwc3_msm_ep_queue function.
560*
561* @dwc3_ep - pointer to dwc3_ep instance.
562* @req - pointer to dwc3_request instance.
563*
564* @return int - 0 on success, negetive on error.
565*/
566static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
567{
Ido Shayevitzfa65a582012-06-06 14:39:54 +0300568 struct dwc3_trb *trb;
569 struct dwc3_trb *trb_link;
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300570 struct dwc3_gadget_ep_cmd_params params;
571 u32 cmd;
572 int ret = 0;
573
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300574 /* We push the request to the dep->req_queued list to indicate that
575 * this request is issued with start transfer. The request will be out
576 * from this list in 2 cases. The first is that the transfer will be
577 * completed (not if the transfer is endless using a circular TRBs with
578 * with link TRB). The second case is an option to do stop stransfer,
579 * this can be initiated by the function driver when calling dequeue.
580 */
581 req->queued = true;
582 list_add_tail(&req->list, &dep->req_queued);
583
584 /* First, prepare a normal TRB, point to the fake buffer */
Ido Shayevitzfa65a582012-06-06 14:39:54 +0300585 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300586 dep->free_slot++;
Ido Shayevitzfa65a582012-06-06 14:39:54 +0300587 memset(trb, 0, sizeof(*trb));
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300588
Ido Shayevitzfa65a582012-06-06 14:39:54 +0300589 req->trb = trb;
Shimrit Malichia00d7322012-08-05 13:56:28 +0300590 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
Ido Shayevitzfa65a582012-06-06 14:39:54 +0300591 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
592 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO | DWC3_TRB_CTRL_CHN;
Shimrit Malichia00d7322012-08-05 13:56:28 +0300593 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300594
595 /* Second, prepare a Link TRB that points to the first TRB*/
Ido Shayevitzfa65a582012-06-06 14:39:54 +0300596 trb_link = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300597 dep->free_slot++;
Shimrit Malichia00d7322012-08-05 13:56:28 +0300598 memset(trb_link, 0, sizeof *trb_link);
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300599
Ido Shayevitzfa65a582012-06-06 14:39:54 +0300600 trb_link->bpl = lower_32_bits(req->trb_dma);
Shimrit Malichia00d7322012-08-05 13:56:28 +0300601 trb_link->bph = DBM_TRB_BIT |
Ido Shayevitzfa65a582012-06-06 14:39:54 +0300602 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
603 trb_link->size = 0;
604 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300605
606 /*
607 * Now start the transfer
608 */
609 memset(&params, 0, sizeof(params));
Shimrit Malichia00d7322012-08-05 13:56:28 +0300610 params.param0 = 0; /* TDAddr High */
611 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
612
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300613 cmd = DWC3_DEPCMD_STARTTRANSFER;
614 ret = dwc3_send_gadget_ep_cmd(dep->dwc, dep->number, cmd, &params);
615 if (ret < 0) {
616 dev_dbg(dep->dwc->dev,
617 "%s: failed to send STARTTRANSFER command\n",
618 __func__);
619
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300620 list_del(&req->list);
621 return ret;
622 }
623
624 return ret;
625}
626
627/**
628* Queue a usb request to the DBM endpoint.
629* This function should be called after the endpoint
630* was enabled by the ep_enable.
631*
632* This function prepares special structure of TRBs which
633* is familier with the DBM HW, so it will possible to use
634* this endpoint in DBM mode.
635*
636* The TRBs prepared by this function, is one normal TRB
637* which point to a fake buffer, followed by a link TRB
638* that points to the first TRB.
639*
640* The API of this function follow the regular API of
641* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
642*
643* @usb_ep - pointer to usb_ep instance.
644* @request - pointer to usb_request instance.
645* @gfp_flags - possible flags.
646*
647* @return int - 0 on success, negetive on error.
648*/
649static int dwc3_msm_ep_queue(struct usb_ep *ep,
650 struct usb_request *request, gfp_t gfp_flags)
651{
652 struct dwc3_request *req = to_dwc3_request(request);
653 struct dwc3_ep *dep = to_dwc3_ep(ep);
654 struct dwc3 *dwc = dep->dwc;
655 struct dwc3_msm_req_complete *req_complete;
656 unsigned long flags;
657 int ret = 0;
658 u8 bam_pipe;
659 bool producer;
660 bool disable_wb;
661 bool internal_mem;
662 bool ioc;
Shimrit Malichia00d7322012-08-05 13:56:28 +0300663 u8 speed;
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300664
665 if (!(request->udc_priv & MSM_SPS_MODE)) {
666 /* Not SPS mode, call original queue */
667 dev_vdbg(dwc->dev, "%s: not sps mode, use regular queue\n",
668 __func__);
669
670 return (context->original_ep_ops[dep->number])->queue(ep,
671 request,
672 gfp_flags);
673 }
674
675 if (!dep->endpoint.desc) {
676 dev_err(dwc->dev,
677 "%s: trying to queue request %p to disabled ep %s\n",
678 __func__, request, ep->name);
679 return -EPERM;
680 }
681
682 if (dep->number == 0 || dep->number == 1) {
683 dev_err(dwc->dev,
684 "%s: trying to queue dbm request %p to control ep %s\n",
685 __func__, request, ep->name);
686 return -EPERM;
687 }
688
689 if (dep->free_slot > 0 || dep->busy_slot > 0 ||
690 !list_empty(&dep->request_list) ||
691 !list_empty(&dep->req_queued)) {
692
693 dev_err(dwc->dev,
694 "%s: trying to queue dbm request %p tp ep %s\n",
695 __func__, request, ep->name);
696 return -EPERM;
697 }
698
699 /*
700 * Override req->complete function, but before doing that,
701 * store it's original pointer in the req_complete_list.
702 */
703 req_complete = kzalloc(sizeof(*req_complete), GFP_KERNEL);
704 if (!req_complete) {
705 dev_err(dep->dwc->dev, "%s: not enough memory\n", __func__);
706 return -ENOMEM;
707 }
708 req_complete->req = request;
709 req_complete->orig_complete = request->complete;
710 list_add_tail(&req_complete->list_item, &context->req_complete_list);
711 request->complete = dwc3_msm_req_complete_func;
712
713 /*
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300714 * Configure the DBM endpoint
715 */
Shimrit Malichia00d7322012-08-05 13:56:28 +0300716 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300717 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
718 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
719 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
720 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
721
722 ret = dwc3_msm_dbm_ep_config(dep->number,
723 bam_pipe, producer,
724 disable_wb, internal_mem, ioc);
725 if (ret < 0) {
726 dev_err(context->dev,
727 "error %d after calling dwc3_msm_dbm_ep_config\n",
728 ret);
729 return ret;
730 }
731
732 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
733 __func__, request, ep->name, request->length);
734
735 /*
736 * We must obtain the lock of the dwc3 core driver,
737 * including disabling interrupts, so we will be sure
738 * that we are the only ones that configure the HW device
739 * core and ensure that we queuing the request will finish
740 * as soon as possible so we will release back the lock.
741 */
742 spin_lock_irqsave(&dwc->lock, flags);
743 ret = __dwc3_msm_ep_queue(dep, req);
744 spin_unlock_irqrestore(&dwc->lock, flags);
745 if (ret < 0) {
746 dev_err(context->dev,
747 "error %d after calling __dwc3_msm_ep_queue\n", ret);
748 return ret;
749 }
750
Shimrit Malichia00d7322012-08-05 13:56:28 +0300751 speed = dwc3_readl(dwc->regs, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
752 dwc3_msm_write_reg(context->base, DBM_GEN_CFG, speed >> 2);
753
Ido Shayevitz9fb83452012-04-01 17:45:58 +0300754 return 0;
755}
756
757/**
758 * Configure MSM endpoint.
759 * This function do specific configurations
760 * to an endpoint which need specific implementaion
761 * in the MSM architecture.
762 *
763 * This function should be called by usb function/class
764 * layer which need a support from the specific MSM HW
765 * which wrap the USB3 core. (like DBM specific endpoints)
766 *
767 * @ep - a pointer to some usb_ep instance
768 *
769 * @return int - 0 on success, negetive on error.
770 */
771int msm_ep_config(struct usb_ep *ep)
772{
773 struct dwc3_ep *dep = to_dwc3_ep(ep);
774 struct usb_ep_ops *new_ep_ops;
775
776 /* Save original ep ops for future restore*/
777 if (context->original_ep_ops[dep->number]) {
778 dev_err(context->dev,
779 "ep [%s,%d] already configured as msm endpoint\n",
780 ep->name, dep->number);
781 return -EPERM;
782 }
783 context->original_ep_ops[dep->number] = ep->ops;
784
785 /* Set new usb ops as we like */
786 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_KERNEL);
787 if (!new_ep_ops) {
788 dev_err(context->dev,
789 "%s: unable to allocate mem for new usb ep ops\n",
790 __func__);
791 return -ENOMEM;
792 }
793 (*new_ep_ops) = (*ep->ops);
794 new_ep_ops->queue = dwc3_msm_ep_queue;
795 ep->ops = new_ep_ops;
796
797 /*
798 * Do HERE more usb endpoint configurations
799 * which are specific to MSM.
800 */
801
802 return 0;
803}
804EXPORT_SYMBOL(msm_ep_config);
805
806/**
807 * Un-configure MSM endpoint.
808 * Tear down configurations done in the
809 * dwc3_msm_ep_config function.
810 *
811 * @ep - a pointer to some usb_ep instance
812 *
813 * @return int - 0 on success, negetive on error.
814 */
815int msm_ep_unconfig(struct usb_ep *ep)
816{
817 struct dwc3_ep *dep = to_dwc3_ep(ep);
818 struct usb_ep_ops *old_ep_ops;
819
820 /* Restore original ep ops */
821 if (!context->original_ep_ops[dep->number]) {
822 dev_err(context->dev,
823 "ep [%s,%d] was not configured as msm endpoint\n",
824 ep->name, dep->number);
825 return -EINVAL;
826 }
827 old_ep_ops = (struct usb_ep_ops *)ep->ops;
828 ep->ops = context->original_ep_ops[dep->number];
829 context->original_ep_ops[dep->number] = NULL;
830 kfree(old_ep_ops);
831
832 /*
833 * Do HERE more usb endpoint un-configurations
834 * which are specific to MSM.
835 */
836
837 return 0;
838}
839EXPORT_SYMBOL(msm_ep_unconfig);
840
Manu Gautam60e01352012-05-29 09:00:34 +0530841/* HSPHY */
842static int dwc3_hsusb_config_vddcx(int high)
843{
844 int min_vol, ret;
845 struct dwc3_msm *dwc = context;
846 enum usb_vdd_type vdd_type = context->hs_vdd_type;
847 int max_vol = vdd_val[vdd_type][VDD_MAX];
848
849 min_vol = vdd_val[vdd_type][high ? VDD_MIN : VDD_NONE];
850 ret = regulator_set_voltage(dwc->hsusb_vddcx, min_vol, max_vol);
851 if (ret) {
852 dev_err(dwc->dev, "unable to set voltage for HSUSB_VDDCX\n");
853 return ret;
854 }
855
856 dev_dbg(dwc->dev, "%s: min_vol:%d max_vol:%d\n", __func__,
857 min_vol, max_vol);
858
859 return ret;
860}
861
862static int dwc3_hsusb_ldo_init(int init)
863{
864 int rc = 0;
865 struct dwc3_msm *dwc = context;
866
867 if (!init) {
868 regulator_set_voltage(dwc->hsusb_1p8, 0, USB_HSPHY_1P8_VOL_MAX);
869 regulator_set_voltage(dwc->hsusb_3p3, 0, USB_HSPHY_3P3_VOL_MAX);
870 return 0;
871 }
872
873 dwc->hsusb_3p3 = devm_regulator_get(dwc->dev, "HSUSB_3p3");
874 if (IS_ERR(dwc->hsusb_3p3)) {
875 dev_err(dwc->dev, "unable to get hsusb 3p3\n");
876 return PTR_ERR(dwc->hsusb_3p3);
877 }
878
879 rc = regulator_set_voltage(dwc->hsusb_3p3,
880 USB_HSPHY_3P3_VOL_MIN, USB_HSPHY_3P3_VOL_MAX);
881 if (rc) {
882 dev_err(dwc->dev, "unable to set voltage for hsusb 3p3\n");
883 return rc;
884 }
885 dwc->hsusb_1p8 = devm_regulator_get(dwc->dev, "HSUSB_1p8");
886 if (IS_ERR(dwc->hsusb_1p8)) {
887 dev_err(dwc->dev, "unable to get hsusb 1p8\n");
888 rc = PTR_ERR(dwc->hsusb_1p8);
889 goto devote_3p3;
890 }
891 rc = regulator_set_voltage(dwc->hsusb_1p8,
892 USB_HSPHY_1P8_VOL_MIN, USB_HSPHY_1P8_VOL_MAX);
893 if (rc) {
894 dev_err(dwc->dev, "unable to set voltage for hsusb 1p8\n");
895 goto devote_3p3;
896 }
897
898 return 0;
899
900devote_3p3:
901 regulator_set_voltage(dwc->hsusb_3p3, 0, USB_HSPHY_3P3_VOL_MAX);
902
903 return rc;
904}
905
906static int dwc3_hsusb_ldo_enable(int on)
907{
908 int rc = 0;
909 struct dwc3_msm *dwc = context;
910
911 dev_dbg(dwc->dev, "reg (%s)\n", on ? "HPM" : "LPM");
912
913 if (!on)
914 goto disable_regulators;
915
916
917 rc = regulator_set_optimum_mode(dwc->hsusb_1p8, USB_HSPHY_1P8_HPM_LOAD);
918 if (rc < 0) {
919 dev_err(dwc->dev, "Unable to set HPM of regulator HSUSB_1p8\n");
920 return rc;
921 }
922
923 rc = regulator_enable(dwc->hsusb_1p8);
924 if (rc) {
925 dev_err(dwc->dev, "Unable to enable HSUSB_1p8\n");
926 goto put_1p8_lpm;
927 }
928
929 rc = regulator_set_optimum_mode(dwc->hsusb_3p3, USB_HSPHY_3P3_HPM_LOAD);
930 if (rc < 0) {
931 dev_err(dwc->dev, "Unable to set HPM of regulator HSUSB_3p3\n");
932 goto disable_1p8;
933 }
934
935 rc = regulator_enable(dwc->hsusb_3p3);
936 if (rc) {
937 dev_err(dwc->dev, "Unable to enable HSUSB_3p3\n");
938 goto put_3p3_lpm;
939 }
940
941 return 0;
942
943disable_regulators:
944 rc = regulator_disable(dwc->hsusb_3p3);
945 if (rc)
946 dev_err(dwc->dev, "Unable to disable HSUSB_3p3\n");
947
948put_3p3_lpm:
949 rc = regulator_set_optimum_mode(dwc->hsusb_3p3, 0);
950 if (rc < 0)
951 dev_err(dwc->dev, "Unable to set LPM of regulator HSUSB_3p3\n");
952
953disable_1p8:
954 rc = regulator_disable(dwc->hsusb_1p8);
955 if (rc)
956 dev_err(dwc->dev, "Unable to disable HSUSB_1p8\n");
957
958put_1p8_lpm:
959 rc = regulator_set_optimum_mode(dwc->hsusb_1p8, 0);
960 if (rc < 0)
961 dev_err(dwc->dev, "Unable to set LPM of regulator HSUSB_1p8\n");
962
963 return rc < 0 ? rc : 0;
964}
965
966/* SSPHY */
967static int dwc3_ssusb_config_vddcx(int high)
968{
969 int min_vol, ret;
970 struct dwc3_msm *dwc = context;
971 enum usb_vdd_type vdd_type = context->ss_vdd_type;
972 int max_vol = vdd_val[vdd_type][VDD_MAX];
973
974 min_vol = vdd_val[vdd_type][high ? VDD_MIN : VDD_NONE];
975 ret = regulator_set_voltage(dwc->ssusb_vddcx, min_vol, max_vol);
976 if (ret) {
977 dev_err(dwc->dev, "unable to set voltage for SSUSB_VDDCX\n");
978 return ret;
979 }
980
981 dev_dbg(dwc->dev, "%s: min_vol:%d max_vol:%d\n", __func__,
982 min_vol, max_vol);
983 return ret;
984}
985
986/* 3.3v supply not needed for SS PHY */
987static int dwc3_ssusb_ldo_init(int init)
988{
989 int rc = 0;
990 struct dwc3_msm *dwc = context;
991
992 if (!init) {
993 regulator_set_voltage(dwc->ssusb_1p8, 0, USB_SSPHY_1P8_VOL_MAX);
994 return 0;
995 }
996
997 dwc->ssusb_1p8 = devm_regulator_get(dwc->dev, "SSUSB_1p8");
998 if (IS_ERR(dwc->ssusb_1p8)) {
999 dev_err(dwc->dev, "unable to get ssusb 1p8\n");
1000 return PTR_ERR(dwc->ssusb_1p8);
1001 }
1002 rc = regulator_set_voltage(dwc->ssusb_1p8,
1003 USB_SSPHY_1P8_VOL_MIN, USB_SSPHY_1P8_VOL_MAX);
1004 if (rc)
1005 dev_err(dwc->dev, "unable to set voltage for ssusb 1p8\n");
1006
1007 return rc;
1008}
1009
1010static int dwc3_ssusb_ldo_enable(int on)
1011{
1012 int rc = 0;
1013 struct dwc3_msm *dwc = context;
1014
1015 dev_dbg(context->dev, "reg (%s)\n", on ? "HPM" : "LPM");
1016
1017 if (!on)
1018 goto disable_regulators;
1019
1020
1021 rc = regulator_set_optimum_mode(dwc->ssusb_1p8, USB_SSPHY_1P8_HPM_LOAD);
1022 if (rc < 0) {
1023 dev_err(dwc->dev, "Unable to set HPM of SSUSB_1p8\n");
1024 return rc;
1025 }
1026
1027 rc = regulator_enable(dwc->ssusb_1p8);
1028 if (rc) {
1029 dev_err(dwc->dev, "Unable to enable SSUSB_1p8\n");
1030 goto put_1p8_lpm;
1031 }
1032
1033 return 0;
1034
1035disable_regulators:
1036 rc = regulator_disable(dwc->ssusb_1p8);
1037 if (rc)
1038 dev_err(dwc->dev, "Unable to disable SSUSB_1p8\n");
1039
1040put_1p8_lpm:
1041 rc = regulator_set_optimum_mode(dwc->ssusb_1p8, 0);
1042 if (rc < 0)
1043 dev_err(dwc->dev, "Unable to set LPM of SSUSB_1p8\n");
1044
1045 return rc < 0 ? rc : 0;
1046}
1047
Manu Gautam8c642812012-06-07 10:35:10 +05301048static void dwc3_chg_enable_secondary_det(struct dwc3_msm *mdwc)
1049{
1050 u32 chg_ctrl;
1051
1052 /* Turn off VDP_SRC */
1053 dwc3_msm_write_reg(mdwc->base, CHARGING_DET_CTRL_REG, 0x0);
1054 msleep(20);
1055
1056 /* Before proceeding make sure VDP_SRC is OFF */
1057 chg_ctrl = dwc3_msm_read_reg(mdwc->base, CHARGING_DET_CTRL_REG);
1058 if (chg_ctrl & 0x3F)
1059 dev_err(mdwc->dev, "%s Unable to reset chg_det block: %x\n",
1060 __func__, chg_ctrl);
1061 /*
1062 * Configure DM as current source, DP as current sink
1063 * and enable battery charging comparators.
1064 */
1065 dwc3_msm_write_readback(mdwc->base, CHARGING_DET_CTRL_REG, 0x3F, 0x34);
1066}
1067
1068static bool dwc3_chg_det_check_output(struct dwc3_msm *mdwc)
1069{
1070 u32 chg_det;
1071 bool ret = false;
1072
1073 chg_det = dwc3_msm_read_reg(mdwc->base, CHARGING_DET_OUTPUT_REG);
1074 ret = chg_det & 1;
1075
1076 return ret;
1077}
1078
1079static void dwc3_chg_enable_primary_det(struct dwc3_msm *mdwc)
1080{
1081 /*
1082 * Configure DP as current source, DM as current sink
1083 * and enable battery charging comparators.
1084 */
1085 dwc3_msm_write_readback(mdwc->base, CHARGING_DET_CTRL_REG, 0x3F, 0x30);
1086}
1087
1088static inline bool dwc3_chg_check_dcd(struct dwc3_msm *mdwc)
1089{
1090 u32 chg_state;
1091 bool ret = false;
1092
1093 chg_state = dwc3_msm_read_reg(mdwc->base, CHARGING_DET_OUTPUT_REG);
1094 ret = chg_state & 2;
1095
1096 return ret;
1097}
1098
1099static inline void dwc3_chg_disable_dcd(struct dwc3_msm *mdwc)
1100{
1101 dwc3_msm_write_readback(mdwc->base, CHARGING_DET_CTRL_REG, 0x3F, 0x0);
1102}
1103
1104static inline void dwc3_chg_enable_dcd(struct dwc3_msm *mdwc)
1105{
1106 /* Data contact detection enable, DCDENB */
1107 dwc3_msm_write_readback(mdwc->base, CHARGING_DET_CTRL_REG, 0x3F, 0x2);
1108}
1109
1110static void dwc3_chg_block_reset(struct dwc3_msm *mdwc)
1111{
1112 u32 chg_ctrl;
1113
1114 /* Clear charger detecting control bits */
1115 dwc3_msm_write_reg(mdwc->base, CHARGING_DET_CTRL_REG, 0x0);
1116
1117 /* Clear alt interrupt latch and enable bits */
1118 dwc3_msm_write_reg(mdwc->base, HS_PHY_IRQ_STAT_REG, 0xFFF);
1119 dwc3_msm_write_reg(mdwc->base, ALT_INTERRUPT_EN_REG, 0x0);
1120
1121 udelay(100);
1122
1123 /* Before proceeding make sure charger block is RESET */
1124 chg_ctrl = dwc3_msm_read_reg(mdwc->base, CHARGING_DET_CTRL_REG);
1125 if (chg_ctrl & 0x3F)
1126 dev_err(mdwc->dev, "%s Unable to reset chg_det block: %x\n",
1127 __func__, chg_ctrl);
1128}
1129
1130static const char *chg_to_string(enum dwc3_chg_type chg_type)
1131{
1132 switch (chg_type) {
1133 case USB_SDP_CHARGER: return "USB_SDP_CHARGER";
1134 case USB_DCP_CHARGER: return "USB_DCP_CHARGER";
1135 case USB_CDP_CHARGER: return "USB_CDP_CHARGER";
1136 default: return "INVALID_CHARGER";
1137 }
1138}
1139
1140#define DWC3_CHG_DCD_POLL_TIME (100 * HZ/1000) /* 100 msec */
1141#define DWC3_CHG_DCD_MAX_RETRIES 6 /* Tdcd_tmout = 6 * 100 msec */
1142#define DWC3_CHG_PRIMARY_DET_TIME (50 * HZ/1000) /* TVDPSRC_ON */
1143#define DWC3_CHG_SECONDARY_DET_TIME (50 * HZ/1000) /* TVDMSRC_ON */
1144
1145static void dwc3_chg_detect_work(struct work_struct *w)
1146{
1147 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, chg_work.work);
1148 bool is_dcd = false, tmout, vout;
1149 unsigned long delay;
1150
1151 dev_dbg(mdwc->dev, "chg detection work\n");
1152 switch (mdwc->chg_state) {
1153 case USB_CHG_STATE_UNDEFINED:
1154 dwc3_chg_block_reset(mdwc);
1155 dwc3_chg_enable_dcd(mdwc);
1156 mdwc->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
1157 mdwc->dcd_retries = 0;
1158 delay = DWC3_CHG_DCD_POLL_TIME;
1159 break;
1160 case USB_CHG_STATE_WAIT_FOR_DCD:
1161 is_dcd = dwc3_chg_check_dcd(mdwc);
1162 tmout = ++mdwc->dcd_retries == DWC3_CHG_DCD_MAX_RETRIES;
1163 if (is_dcd || tmout) {
1164 dwc3_chg_disable_dcd(mdwc);
1165 dwc3_chg_enable_primary_det(mdwc);
1166 delay = DWC3_CHG_PRIMARY_DET_TIME;
1167 mdwc->chg_state = USB_CHG_STATE_DCD_DONE;
1168 } else {
1169 delay = DWC3_CHG_DCD_POLL_TIME;
1170 }
1171 break;
1172 case USB_CHG_STATE_DCD_DONE:
1173 vout = dwc3_chg_det_check_output(mdwc);
1174 if (vout) {
1175 dwc3_chg_enable_secondary_det(mdwc);
1176 delay = DWC3_CHG_SECONDARY_DET_TIME;
1177 mdwc->chg_state = USB_CHG_STATE_PRIMARY_DONE;
1178 } else {
1179 mdwc->charger.chg_type = USB_SDP_CHARGER;
1180 mdwc->chg_state = USB_CHG_STATE_DETECTED;
1181 delay = 0;
1182 }
1183 break;
1184 case USB_CHG_STATE_PRIMARY_DONE:
1185 vout = dwc3_chg_det_check_output(mdwc);
1186 if (vout)
1187 mdwc->charger.chg_type = USB_DCP_CHARGER;
1188 else
1189 mdwc->charger.chg_type = USB_CDP_CHARGER;
1190 mdwc->chg_state = USB_CHG_STATE_SECONDARY_DONE;
1191 /* fall through */
1192 case USB_CHG_STATE_SECONDARY_DONE:
1193 mdwc->chg_state = USB_CHG_STATE_DETECTED;
1194 /* fall through */
1195 case USB_CHG_STATE_DETECTED:
1196 dwc3_chg_block_reset(mdwc);
1197 dev_dbg(mdwc->dev, "chg_type = %s\n",
1198 chg_to_string(mdwc->charger.chg_type));
1199 mdwc->charger.notify_detection_complete(mdwc->otg_xceiv->otg,
1200 &mdwc->charger);
1201 return;
1202 default:
1203 return;
1204 }
1205
1206 queue_delayed_work(system_nrt_wq, &mdwc->chg_work, delay);
1207}
1208
1209static void dwc3_start_chg_det(struct dwc3_charger *charger, bool start)
1210{
1211 struct dwc3_msm *mdwc = context;
1212
1213 if (start == false) {
1214 cancel_delayed_work_sync(&mdwc->chg_work);
1215 mdwc->chg_state = USB_CHG_STATE_UNDEFINED;
1216 charger->chg_type = DWC3_INVALID_CHARGER;
1217 return;
1218 }
1219
1220 mdwc->chg_state = USB_CHG_STATE_UNDEFINED;
1221 charger->chg_type = DWC3_INVALID_CHARGER;
1222 queue_delayed_work(system_nrt_wq, &mdwc->chg_work, 0);
1223}
1224
Manu Gautamb5067272012-07-02 09:53:41 +05301225static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1226{
1227 dev_dbg(mdwc->dev, "%s: entering lpm\n", __func__);
1228
1229 if (atomic_read(&mdwc->in_lpm)) {
1230 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1231 return 0;
1232 }
1233
Manu Gautam3e9ad352012-08-16 14:44:47 -07001234 clk_disable_unprepare(mdwc->iface_clk);
Manu Gautamb5067272012-07-02 09:53:41 +05301235 clk_disable_unprepare(mdwc->core_clk);
Manu Gautam3e9ad352012-08-16 14:44:47 -07001236 clk_disable_unprepare(mdwc->ref_clk);
Manu Gautamb5067272012-07-02 09:53:41 +05301237 dwc3_hsusb_ldo_enable(0);
1238 dwc3_ssusb_ldo_enable(0);
1239 wake_unlock(&mdwc->wlock);
1240
1241 atomic_set(&mdwc->in_lpm, 1);
1242 dev_info(mdwc->dev, "DWC3 in low power mode\n");
1243
1244 return 0;
1245}
1246
1247static int dwc3_msm_resume(struct dwc3_msm *mdwc)
1248{
1249 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
1250
1251 if (!atomic_read(&mdwc->in_lpm)) {
1252 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
1253 return 0;
1254 }
1255
1256 wake_lock(&mdwc->wlock);
Manu Gautam3e9ad352012-08-16 14:44:47 -07001257 clk_prepare_enable(mdwc->ref_clk);
Manu Gautamb5067272012-07-02 09:53:41 +05301258 clk_prepare_enable(mdwc->core_clk);
Manu Gautam3e9ad352012-08-16 14:44:47 -07001259 clk_prepare_enable(mdwc->iface_clk);
Manu Gautamb5067272012-07-02 09:53:41 +05301260 dwc3_hsusb_ldo_enable(1);
1261 dwc3_ssusb_ldo_enable(1);
1262
1263 atomic_set(&mdwc->in_lpm, 0);
1264 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
1265
1266 return 0;
1267}
1268
1269static void dwc3_resume_work(struct work_struct *w)
1270{
1271 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1272 resume_work.work);
1273
1274 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
1275 /* handle any event that was queued while work was already running */
1276 if (!atomic_read(&mdwc->in_lpm)) {
1277 dev_dbg(mdwc->dev, "%s: notifying xceiv event\n", __func__);
1278 if (mdwc->otg_xceiv)
1279 mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
1280 DWC3_EVENT_XCEIV_STATE);
1281 return;
1282 }
1283
1284 /* bail out if system resume in process, else initiate RESUME */
1285 if (atomic_read(&mdwc->pm_suspended)) {
1286 mdwc->resume_pending = true;
1287 } else {
1288 pm_runtime_get_sync(mdwc->dev);
1289 if (mdwc->otg_xceiv)
1290 mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
1291 DWC3_EVENT_PHY_RESUME);
1292 pm_runtime_put_sync(mdwc->dev);
1293 }
1294}
1295
1296static bool debug_id, debug_bsv, debug_connect;
1297
1298static int dwc3_connect_show(struct seq_file *s, void *unused)
1299{
1300 if (debug_connect)
1301 seq_printf(s, "true\n");
1302 else
1303 seq_printf(s, "false\n");
1304
1305 return 0;
1306}
1307
1308static int dwc3_connect_open(struct inode *inode, struct file *file)
1309{
1310 return single_open(file, dwc3_connect_show, inode->i_private);
1311}
1312
1313static ssize_t dwc3_connect_write(struct file *file, const char __user *ubuf,
1314 size_t count, loff_t *ppos)
1315{
1316 struct seq_file *s = file->private_data;
1317 struct dwc3_msm *mdwc = s->private;
1318 char buf[8];
1319
1320 memset(buf, 0x00, sizeof(buf));
1321
1322 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
1323 return -EFAULT;
1324
1325 if (!strncmp(buf, "enable", 6) || !strncmp(buf, "true", 4)) {
1326 debug_connect = true;
1327 } else {
1328 debug_connect = debug_bsv = false;
1329 debug_id = true;
1330 }
1331
1332 mdwc->ext_xceiv.bsv = debug_bsv;
1333 mdwc->ext_xceiv.id = debug_id ? DWC3_ID_FLOAT : DWC3_ID_GROUND;
1334
1335 if (atomic_read(&mdwc->in_lpm)) {
1336 dev_dbg(mdwc->dev, "%s: calling resume_work\n", __func__);
1337 dwc3_resume_work(&mdwc->resume_work.work);
1338 } else {
1339 dev_dbg(mdwc->dev, "%s: notifying xceiv event\n", __func__);
1340 if (mdwc->otg_xceiv)
1341 mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
1342 DWC3_EVENT_XCEIV_STATE);
1343 }
1344
1345 return count;
1346}
1347
1348const struct file_operations dwc3_connect_fops = {
1349 .open = dwc3_connect_open,
1350 .read = seq_read,
1351 .write = dwc3_connect_write,
1352 .llseek = seq_lseek,
1353 .release = single_release,
1354};
1355
1356static struct dentry *dwc3_debugfs_root;
1357
1358static void dwc3_debugfs_init(struct dwc3_msm *mdwc)
1359{
1360 dwc3_debugfs_root = debugfs_create_dir("msm_dwc3", NULL);
1361
1362 if (!dwc3_debugfs_root || IS_ERR(dwc3_debugfs_root))
1363 return;
1364
1365 if (!debugfs_create_bool("id", S_IRUGO | S_IWUSR, dwc3_debugfs_root,
1366 (u32 *)&debug_id))
1367 goto error;
1368
1369 if (!debugfs_create_bool("bsv", S_IRUGO | S_IWUSR, dwc3_debugfs_root,
1370 (u32 *)&debug_bsv))
1371 goto error;
1372
1373 if (!debugfs_create_file("connect", S_IRUGO | S_IWUSR,
1374 dwc3_debugfs_root, mdwc, &dwc3_connect_fops))
1375 goto error;
1376
1377 return;
1378
1379error:
1380 debugfs_remove_recursive(dwc3_debugfs_root);
1381}
Manu Gautam8c642812012-06-07 10:35:10 +05301382
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001383static int __devinit dwc3_msm_probe(struct platform_device *pdev)
1384{
1385 struct device_node *node = pdev->dev.of_node;
1386 struct platform_device *dwc3;
1387 struct dwc3_msm *msm;
1388 struct resource *res;
Ido Shayevitz7ad8ded2012-08-28 04:30:58 +03001389 void __iomem *tcsr;
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001390 int ret = 0;
1391
1392 msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
1393 if (!msm) {
1394 dev_err(&pdev->dev, "not enough memory\n");
1395 return -ENOMEM;
1396 }
1397
1398 platform_set_drvdata(pdev, msm);
Ido Shayevitz9fb83452012-04-01 17:45:58 +03001399 context = msm;
Manu Gautam60e01352012-05-29 09:00:34 +05301400 msm->dev = &pdev->dev;
Ido Shayevitz9fb83452012-04-01 17:45:58 +03001401
1402 INIT_LIST_HEAD(&msm->req_complete_list);
Manu Gautam8c642812012-06-07 10:35:10 +05301403 INIT_DELAYED_WORK(&msm->chg_work, dwc3_chg_detect_work);
Manu Gautamb5067272012-07-02 09:53:41 +05301404 INIT_DELAYED_WORK(&msm->resume_work, dwc3_resume_work);
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001405
Manu Gautam1742db22012-06-19 13:33:24 +05301406 /*
1407 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
1408 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
1409 */
1410 msm->core_clk = devm_clk_get(&pdev->dev, "core_clk");
1411 if (IS_ERR(msm->core_clk)) {
1412 dev_err(&pdev->dev, "failed to get core_clk\n");
1413 return PTR_ERR(msm->core_clk);
1414 }
1415 clk_set_rate(msm->core_clk, 125000000);
1416 clk_prepare_enable(msm->core_clk);
1417
Manu Gautam3e9ad352012-08-16 14:44:47 -07001418 msm->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
1419 if (IS_ERR(msm->iface_clk)) {
1420 dev_err(&pdev->dev, "failed to get iface_clk\n");
1421 ret = PTR_ERR(msm->iface_clk);
1422 goto disable_core_clk;
1423 }
1424 clk_prepare_enable(msm->iface_clk);
1425
1426 msm->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
1427 if (IS_ERR(msm->sleep_clk)) {
1428 dev_err(&pdev->dev, "failed to get sleep_clk\n");
1429 ret = PTR_ERR(msm->sleep_clk);
1430 goto disable_iface_clk;
1431 }
1432 clk_prepare_enable(msm->sleep_clk);
1433
1434 msm->hsphy_sleep_clk = devm_clk_get(&pdev->dev, "sleep_a_clk");
1435 if (IS_ERR(msm->hsphy_sleep_clk)) {
1436 dev_err(&pdev->dev, "failed to get sleep_a_clk\n");
1437 ret = PTR_ERR(msm->hsphy_sleep_clk);
1438 goto disable_sleep_clk;
1439 }
1440 clk_prepare_enable(msm->hsphy_sleep_clk);
1441
1442 msm->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
1443 if (IS_ERR(msm->ref_clk)) {
1444 dev_err(&pdev->dev, "failed to get ref_clk\n");
1445 ret = PTR_ERR(msm->ref_clk);
1446 goto disable_sleep_a_clk;
1447 }
1448 clk_prepare_enable(msm->ref_clk);
1449
Manu Gautam60e01352012-05-29 09:00:34 +05301450 /* SS PHY */
1451 msm->ss_vdd_type = VDDCX_CORNER;
1452 msm->ssusb_vddcx = devm_regulator_get(&pdev->dev, "ssusb_vdd_dig");
1453 if (IS_ERR(msm->ssusb_vddcx)) {
1454 msm->ssusb_vddcx = devm_regulator_get(&pdev->dev,
1455 "SSUSB_VDDCX");
1456 if (IS_ERR(msm->ssusb_vddcx)) {
1457 dev_err(&pdev->dev, "unable to get ssusb vddcx\n");
Manu Gautam1742db22012-06-19 13:33:24 +05301458 ret = PTR_ERR(msm->ssusb_vddcx);
Manu Gautam3e9ad352012-08-16 14:44:47 -07001459 goto disable_ref_clk;
Manu Gautam60e01352012-05-29 09:00:34 +05301460 }
1461 msm->ss_vdd_type = VDDCX;
1462 dev_dbg(&pdev->dev, "ss_vdd_type: VDDCX\n");
1463 }
1464
1465 ret = dwc3_ssusb_config_vddcx(1);
1466 if (ret) {
1467 dev_err(&pdev->dev, "ssusb vddcx configuration failed\n");
Manu Gautam3e9ad352012-08-16 14:44:47 -07001468 goto disable_ref_clk;
Manu Gautam60e01352012-05-29 09:00:34 +05301469 }
1470
1471 ret = regulator_enable(context->ssusb_vddcx);
1472 if (ret) {
1473 dev_err(&pdev->dev, "unable to enable the ssusb vddcx\n");
1474 goto unconfig_ss_vddcx;
1475 }
1476
1477 ret = dwc3_ssusb_ldo_init(1);
1478 if (ret) {
1479 dev_err(&pdev->dev, "ssusb vreg configuration failed\n");
1480 goto disable_ss_vddcx;
1481 }
1482
1483 ret = dwc3_ssusb_ldo_enable(1);
1484 if (ret) {
1485 dev_err(&pdev->dev, "ssusb vreg enable failed\n");
1486 goto free_ss_ldo_init;
1487 }
1488
1489 /* HS PHY */
1490 msm->hs_vdd_type = VDDCX_CORNER;
1491 msm->hsusb_vddcx = devm_regulator_get(&pdev->dev, "hsusb_vdd_dig");
1492 if (IS_ERR(msm->hsusb_vddcx)) {
1493 msm->hsusb_vddcx = devm_regulator_get(&pdev->dev,
1494 "HSUSB_VDDCX");
1495 if (IS_ERR(msm->hsusb_vddcx)) {
1496 dev_err(&pdev->dev, "unable to get hsusb vddcx\n");
1497 ret = PTR_ERR(msm->ssusb_vddcx);
1498 goto disable_ss_ldo;
1499 }
1500 msm->hs_vdd_type = VDDCX;
1501 dev_dbg(&pdev->dev, "hs_vdd_type: VDDCX\n");
1502 }
1503
1504 ret = dwc3_hsusb_config_vddcx(1);
1505 if (ret) {
1506 dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
1507 goto disable_ss_ldo;
1508 }
1509
1510 ret = regulator_enable(context->hsusb_vddcx);
1511 if (ret) {
1512 dev_err(&pdev->dev, "unable to enable the hsusb vddcx\n");
1513 goto unconfig_hs_vddcx;
1514 }
1515
1516 ret = dwc3_hsusb_ldo_init(1);
1517 if (ret) {
1518 dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
1519 goto disable_hs_vddcx;
1520 }
1521
1522 ret = dwc3_hsusb_ldo_enable(1);
1523 if (ret) {
1524 dev_err(&pdev->dev, "hsusb vreg enable failed\n");
1525 goto free_hs_ldo_init;
1526 }
1527
Ido Shayevitz7ad8ded2012-08-28 04:30:58 +03001528 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1529 if (!res) {
1530 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
1531 } else {
1532 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
1533 resource_size(res));
1534 if (!tcsr) {
1535 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
1536 } else {
1537 /* Enable USB3 on the primary USB port. */
1538 writel_relaxed(0x1, tcsr);
1539 /*
1540 * Ensure that TCSR write is completed before
1541 * USB registers initialization.
1542 */
1543 mb();
1544 }
1545 }
1546
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001547 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1548 if (!res) {
1549 dev_err(&pdev->dev, "missing memory base resource\n");
Manu Gautam60e01352012-05-29 09:00:34 +05301550 ret = -ENODEV;
1551 goto disable_hs_ldo;
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001552 }
1553
1554 msm->base = devm_ioremap_nocache(&pdev->dev, res->start,
1555 resource_size(res));
1556 if (!msm->base) {
1557 dev_err(&pdev->dev, "ioremap failed\n");
Manu Gautam60e01352012-05-29 09:00:34 +05301558 ret = -ENODEV;
1559 goto disable_hs_ldo;
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001560 }
1561
Ido Shayevitzca2691e2012-04-17 15:54:53 +03001562 dwc3 = platform_device_alloc("dwc3", -1);
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001563 if (!dwc3) {
1564 dev_err(&pdev->dev, "couldn't allocate dwc3 device\n");
Manu Gautam60e01352012-05-29 09:00:34 +05301565 ret = -ENODEV;
1566 goto disable_hs_ldo;
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001567 }
1568
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001569 dwc3->dev.parent = &pdev->dev;
Ido Shayevitzc9e92e92012-05-30 14:36:35 +03001570 dwc3->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1571 dwc3->dev.dma_mask = &dwc3_msm_dma_mask;
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001572 dwc3->dev.dma_parms = pdev->dev.dma_parms;
1573 msm->resource_size = resource_size(res);
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001574 msm->dwc3 = dwc3;
1575
Manu Gautamb5067272012-07-02 09:53:41 +05301576 pm_runtime_set_active(msm->dev);
1577 pm_runtime_enable(msm->dev);
1578
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001579 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-dbm-eps",
1580 &msm->dbm_num_eps)) {
1581 dev_err(&pdev->dev,
1582 "unable to read platform data num of dbm eps\n");
1583 msm->dbm_num_eps = DBM_MAX_EPS;
1584 }
1585
1586 if (msm->dbm_num_eps > DBM_MAX_EPS) {
1587 dev_err(&pdev->dev,
1588 "Driver doesn't support number of DBM EPs. "
1589 "max: %d, dbm_num_eps: %d\n",
1590 DBM_MAX_EPS, msm->dbm_num_eps);
1591 ret = -ENODEV;
Manu Gautam60e01352012-05-29 09:00:34 +05301592 goto put_pdev;
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001593 }
1594
1595 ret = platform_device_add_resources(dwc3, pdev->resource,
1596 pdev->num_resources);
1597 if (ret) {
1598 dev_err(&pdev->dev, "couldn't add resources to dwc3 device\n");
Manu Gautam60e01352012-05-29 09:00:34 +05301599 goto put_pdev;
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001600 }
1601
1602 ret = platform_device_add(dwc3);
1603 if (ret) {
1604 dev_err(&pdev->dev, "failed to register dwc3 device\n");
Manu Gautam60e01352012-05-29 09:00:34 +05301605 goto put_pdev;
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001606 }
1607
Ido Shayevitz9fb83452012-04-01 17:45:58 +03001608 /* Reset the DBM */
Shimrit Malichia00d7322012-08-05 13:56:28 +03001609 dwc3_msm_dbm_soft_reset(1);
1610 usleep_range(1000, 1200);
1611 dwc3_msm_dbm_soft_reset(0);
1612
1613 dwc3_msm_event_buffer_config(dwc3_readl(msm->base, DWC3_GEVNTADRLO(0)),
1614 dwc3_readl(msm->base, DWC3_GEVNTSIZ(0)));
Ido Shayevitz9fb83452012-04-01 17:45:58 +03001615
Manu Gautam8c642812012-06-07 10:35:10 +05301616 msm->otg_xceiv = usb_get_transceiver();
1617 if (msm->otg_xceiv) {
1618 msm->charger.start_detection = dwc3_start_chg_det;
1619 ret = dwc3_set_charger(msm->otg_xceiv->otg, &msm->charger);
1620 if (ret || !msm->charger.notify_detection_complete) {
1621 dev_err(&pdev->dev, "failed to register charger: %d\n",
1622 ret);
1623 goto put_xcvr;
1624 }
Manu Gautamb5067272012-07-02 09:53:41 +05301625
1626 ret = dwc3_set_ext_xceiv(msm->otg_xceiv->otg, &msm->ext_xceiv);
1627 if (ret || !msm->ext_xceiv.notify_ext_events) {
1628 dev_err(&pdev->dev, "failed to register xceiver: %d\n",
1629 ret);
1630 goto put_xcvr;
1631 }
Manu Gautam8c642812012-06-07 10:35:10 +05301632 } else {
1633 dev_err(&pdev->dev, "%s: No OTG transceiver found\n", __func__);
1634 }
1635
Manu Gautamb5067272012-07-02 09:53:41 +05301636 wake_lock_init(&msm->wlock, WAKE_LOCK_SUSPEND, "msm_dwc3");
1637 wake_lock(&msm->wlock);
1638 dwc3_debugfs_init(msm);
1639
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001640 return 0;
1641
Manu Gautam8c642812012-06-07 10:35:10 +05301642put_xcvr:
1643 usb_put_transceiver(msm->otg_xceiv);
1644 platform_device_del(dwc3);
Manu Gautam60e01352012-05-29 09:00:34 +05301645put_pdev:
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001646 platform_device_put(dwc3);
Manu Gautam60e01352012-05-29 09:00:34 +05301647disable_hs_ldo:
1648 dwc3_hsusb_ldo_enable(0);
1649free_hs_ldo_init:
1650 dwc3_hsusb_ldo_init(0);
1651disable_hs_vddcx:
1652 regulator_disable(context->hsusb_vddcx);
1653unconfig_hs_vddcx:
1654 dwc3_hsusb_config_vddcx(0);
1655disable_ss_ldo:
1656 dwc3_ssusb_ldo_enable(0);
1657free_ss_ldo_init:
1658 dwc3_ssusb_ldo_init(0);
1659disable_ss_vddcx:
1660 regulator_disable(context->ssusb_vddcx);
1661unconfig_ss_vddcx:
1662 dwc3_ssusb_config_vddcx(0);
Manu Gautam3e9ad352012-08-16 14:44:47 -07001663disable_ref_clk:
1664 clk_disable_unprepare(msm->ref_clk);
1665disable_sleep_a_clk:
1666 clk_disable_unprepare(msm->hsphy_sleep_clk);
1667disable_sleep_clk:
1668 clk_disable_unprepare(msm->sleep_clk);
1669disable_iface_clk:
1670 clk_disable_unprepare(msm->iface_clk);
Manu Gautam1742db22012-06-19 13:33:24 +05301671disable_core_clk:
1672 clk_disable_unprepare(msm->core_clk);
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001673
1674 return ret;
1675}
1676
1677static int __devexit dwc3_msm_remove(struct platform_device *pdev)
1678{
1679 struct dwc3_msm *msm = platform_get_drvdata(pdev);
1680
Manu Gautamb5067272012-07-02 09:53:41 +05301681 if (dwc3_debugfs_root)
1682 debugfs_remove_recursive(dwc3_debugfs_root);
Manu Gautam8c642812012-06-07 10:35:10 +05301683 if (msm->otg_xceiv) {
1684 dwc3_start_chg_det(&msm->charger, false);
1685 usb_put_transceiver(msm->otg_xceiv);
1686 }
Manu Gautamb5067272012-07-02 09:53:41 +05301687 pm_runtime_disable(msm->dev);
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001688 platform_device_unregister(msm->dwc3);
Manu Gautamb5067272012-07-02 09:53:41 +05301689 wake_lock_destroy(&msm->wlock);
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001690
Manu Gautam60e01352012-05-29 09:00:34 +05301691 dwc3_hsusb_ldo_enable(0);
1692 dwc3_hsusb_ldo_init(0);
1693 regulator_disable(msm->hsusb_vddcx);
1694 dwc3_hsusb_config_vddcx(0);
1695 dwc3_ssusb_ldo_enable(0);
1696 dwc3_ssusb_ldo_init(0);
1697 regulator_disable(msm->ssusb_vddcx);
1698 dwc3_ssusb_config_vddcx(0);
Manu Gautam1742db22012-06-19 13:33:24 +05301699 clk_disable_unprepare(msm->core_clk);
Manu Gautam3e9ad352012-08-16 14:44:47 -07001700 clk_disable_unprepare(msm->iface_clk);
1701 clk_disable_unprepare(msm->sleep_clk);
1702 clk_disable_unprepare(msm->hsphy_sleep_clk);
1703 clk_disable_unprepare(msm->ref_clk);
Manu Gautam60e01352012-05-29 09:00:34 +05301704
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001705 return 0;
1706}
1707
Manu Gautamb5067272012-07-02 09:53:41 +05301708static int dwc3_msm_pm_suspend(struct device *dev)
1709{
1710 int ret = 0;
1711 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
1712
1713 dev_dbg(dev, "dwc3-msm PM suspend\n");
1714
1715 ret = dwc3_msm_suspend(mdwc);
1716 if (!ret)
1717 atomic_set(&mdwc->pm_suspended, 1);
1718
1719 return ret;
1720}
1721
1722static int dwc3_msm_pm_resume(struct device *dev)
1723{
1724 int ret = 0;
1725 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
1726
1727 dev_dbg(dev, "dwc3-msm PM resume\n");
1728
1729 atomic_set(&mdwc->pm_suspended, 0);
1730 if (mdwc->resume_pending) {
1731 mdwc->resume_pending = false;
1732
1733 ret = dwc3_msm_resume(mdwc);
1734 /* Update runtime PM status */
1735 pm_runtime_disable(dev);
1736 pm_runtime_set_active(dev);
1737 pm_runtime_enable(dev);
1738
1739 /* Let OTG know about resume event and update pm_count */
1740 if (mdwc->otg_xceiv)
1741 mdwc->ext_xceiv.notify_ext_events(mdwc->otg_xceiv->otg,
1742 DWC3_EVENT_PHY_RESUME);
1743 }
1744
1745 return ret;
1746}
1747
1748static int dwc3_msm_runtime_idle(struct device *dev)
1749{
1750 dev_dbg(dev, "DWC3-msm runtime idle\n");
1751
1752 return 0;
1753}
1754
1755static int dwc3_msm_runtime_suspend(struct device *dev)
1756{
1757 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
1758
1759 dev_dbg(dev, "DWC3-msm runtime suspend\n");
1760
1761 return dwc3_msm_suspend(mdwc);
1762}
1763
1764static int dwc3_msm_runtime_resume(struct device *dev)
1765{
1766 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
1767
1768 dev_dbg(dev, "DWC3-msm runtime resume\n");
1769
1770 return dwc3_msm_resume(mdwc);
1771}
1772
1773static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
1774 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
1775 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
1776 dwc3_msm_runtime_idle)
1777};
1778
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001779static const struct of_device_id of_dwc3_matach[] = {
1780 {
1781 .compatible = "qcom,dwc-usb3-msm",
1782 },
1783 { },
1784};
1785MODULE_DEVICE_TABLE(of, of_dwc3_matach);
1786
1787static struct platform_driver dwc3_msm_driver = {
1788 .probe = dwc3_msm_probe,
1789 .remove = __devexit_p(dwc3_msm_remove),
1790 .driver = {
1791 .name = "msm-dwc3",
Manu Gautamb5067272012-07-02 09:53:41 +05301792 .pm = &dwc3_msm_dev_pm_ops,
Ido Shayevitzef72ddd2012-03-28 18:55:55 +02001793 .of_match_table = of_dwc3_matach,
1794 },
1795};
1796
1797MODULE_LICENSE("GPLV2");
1798MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
1799
1800static int __devinit dwc3_msm_init(void)
1801{
1802 return platform_driver_register(&dwc3_msm_driver);
1803}
1804module_init(dwc3_msm_init);
1805
1806static void __exit dwc3_msm_exit(void)
1807{
1808 platform_driver_unregister(&dwc3_msm_driver);
1809}
1810module_exit(dwc3_msm_exit);