blob: 18e55d988bc5fe2e27e479c416e2889cfbdd68f5 [file] [log] [blame]
Mayank Rana511f3b22016-08-02 12:00:11 -07001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070038#include <linux/regulator/consumer.h>
39#include <linux/pm_wakeup.h>
40#include <linux/power_supply.h>
41#include <linux/cdev.h>
42#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070043#include <linux/msm-bus.h>
44#include <linux/irq.h>
45#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053046#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070047#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070048
49#include "power.h"
50#include "core.h"
51#include "gadget.h"
52#include "dbm.h"
53#include "debug.h"
54#include "xhci.h"
55
56/* time out to wait for USB cable status notification (in ms)*/
57#define SM_INIT_TIMEOUT 30000
58
59/* AHB2PHY register offsets */
60#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
61
62/* AHB2PHY read/write waite value */
63#define ONE_READ_WRITE_WAIT 0x11
64
65/* cpu to fix usb interrupt */
66static int cpu_to_affin;
67module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
69
70/* XHCI registers */
71#define USB3_HCSPARAMS1 (0x4)
72#define USB3_PORTSC (0x420)
73
74/**
75 * USB QSCRATCH Hardware registers
76 *
77 */
78#define QSCRATCH_REG_OFFSET (0x000F8800)
79#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
80#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
81#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
82#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
83
84#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
85#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
86#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
87#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
88#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
89
90/* QSCRATCH_GENERAL_CFG register bit offset */
91#define PIPE_UTMI_CLK_SEL BIT(0)
92#define PIPE3_PHYSTATUS_SW BIT(3)
93#define PIPE_UTMI_CLK_DIS BIT(8)
94
95#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
96#define UTMI_OTG_VBUS_VALID BIT(20)
97#define SW_SESSVLD_SEL BIT(28)
98
99#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
100#define LANE0_PWR_PRESENT BIT(24)
101
102/* GSI related registers */
103#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
104#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
105
106#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
107#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
108#define GSI_CLK_EN_MASK BIT(12)
109#define BLOCK_GSI_WR_GO_MASK BIT(1)
110#define GSI_EN_MASK BIT(0)
111
112#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
113#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
114#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
115#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
116
117#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
118#define GSI_WR_CTRL_STATE_MASK BIT(15)
119
120struct dwc3_msm_req_complete {
121 struct list_head list_item;
122 struct usb_request *req;
123 void (*orig_complete)(struct usb_ep *ep,
124 struct usb_request *req);
125};
126
127enum dwc3_id_state {
128 DWC3_ID_GROUND = 0,
129 DWC3_ID_FLOAT,
130};
131
132/* for type c cable */
133enum plug_orientation {
134 ORIENTATION_NONE,
135 ORIENTATION_CC1,
136 ORIENTATION_CC2,
137};
138
139/* Input bits to state machine (mdwc->inputs) */
140
141#define ID 0
142#define B_SESS_VLD 1
143#define B_SUSPEND 2
144
145struct dwc3_msm {
146 struct device *dev;
147 void __iomem *base;
148 void __iomem *ahb2phy_base;
149 struct platform_device *dwc3;
150 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
151 struct list_head req_complete_list;
152 struct clk *xo_clk;
153 struct clk *core_clk;
154 long core_clk_rate;
155 struct clk *iface_clk;
156 struct clk *sleep_clk;
157 struct clk *utmi_clk;
158 unsigned int utmi_clk_rate;
159 struct clk *utmi_clk_src;
160 struct clk *bus_aggr_clk;
161 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530162 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700163 struct regulator *dwc3_gdsc;
164
165 struct usb_phy *hs_phy, *ss_phy;
166
167 struct dbm *dbm;
168
169 /* VBUS regulator for host mode */
170 struct regulator *vbus_reg;
171 int vbus_retry_count;
172 bool resume_pending;
173 atomic_t pm_suspended;
174 int hs_phy_irq;
175 int ss_phy_irq;
176 struct work_struct resume_work;
177 struct work_struct restart_usb_work;
178 bool in_restart;
179 struct workqueue_struct *dwc3_wq;
180 struct delayed_work sm_work;
181 unsigned long inputs;
182 unsigned int max_power;
183 bool charging_disabled;
184 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700185 struct work_struct bus_vote_w;
186 unsigned int bus_vote;
187 u32 bus_perf_client;
188 struct msm_bus_scale_pdata *bus_scale_table;
189 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700190 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700191 bool in_host_mode;
192 unsigned int tx_fifo_size;
193 bool vbus_active;
194 bool suspend;
195 bool disable_host_mode_pm;
196 enum dwc3_id_state id_state;
197 unsigned long lpm_flags;
198#define MDWC3_SS_PHY_SUSPEND BIT(0)
199#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
200#define MDWC3_POWER_COLLAPSE BIT(2)
201
202 unsigned int irq_to_affin;
203 struct notifier_block dwc3_cpu_notifier;
204
205 struct extcon_dev *extcon_vbus;
206 struct extcon_dev *extcon_id;
207 struct notifier_block vbus_nb;
208 struct notifier_block id_nb;
209
210 int pwr_event_irq;
211 atomic_t in_p3;
212 unsigned int lpm_to_suspend_delay;
213 bool init;
214 enum plug_orientation typec_orientation;
215};
216
217#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
218#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
219#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
220
221#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
222#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
223#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
224
225#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
226#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
227#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
228
229#define DSTS_CONNECTSPD_SS 0x4
230
231
232static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
233static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
234
235/**
236 *
237 * Read register with debug info.
238 *
239 * @base - DWC3 base virtual address.
240 * @offset - register offset.
241 *
242 * @return u32
243 */
244static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
245{
246 u32 val = ioread32(base + offset);
247 return val;
248}
249
250/**
251 * Read register masked field with debug info.
252 *
253 * @base - DWC3 base virtual address.
254 * @offset - register offset.
255 * @mask - register bitmask.
256 *
257 * @return u32
258 */
259static inline u32 dwc3_msm_read_reg_field(void *base,
260 u32 offset,
261 const u32 mask)
262{
263 u32 shift = find_first_bit((void *)&mask, 32);
264 u32 val = ioread32(base + offset);
265
266 val &= mask; /* clear other bits */
267 val >>= shift;
268 return val;
269}
270
271/**
272 *
273 * Write register with debug info.
274 *
275 * @base - DWC3 base virtual address.
276 * @offset - register offset.
277 * @val - value to write.
278 *
279 */
280static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
281{
282 iowrite32(val, base + offset);
283}
284
285/**
286 * Write register masked field with debug info.
287 *
288 * @base - DWC3 base virtual address.
289 * @offset - register offset.
290 * @mask - register bitmask.
291 * @val - value to write.
292 *
293 */
294static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
295 const u32 mask, u32 val)
296{
297 u32 shift = find_first_bit((void *)&mask, 32);
298 u32 tmp = ioread32(base + offset);
299
300 tmp &= ~mask; /* clear written bits */
301 val = tmp | (val << shift);
302 iowrite32(val, base + offset);
303}
304
305/**
306 * Write register and read back masked value to confirm it is written
307 *
308 * @base - DWC3 base virtual address.
309 * @offset - register offset.
310 * @mask - register bitmask specifying what should be updated
311 * @val - value to write.
312 *
313 */
314static inline void dwc3_msm_write_readback(void *base, u32 offset,
315 const u32 mask, u32 val)
316{
317 u32 write_val, tmp = ioread32(base + offset);
318
319 tmp &= ~mask; /* retain other bits */
320 write_val = tmp | val;
321
322 iowrite32(write_val, base + offset);
323
324 /* Read back to see if val was written */
325 tmp = ioread32(base + offset);
326 tmp &= mask; /* clear other bits */
327
328 if (tmp != val)
329 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
330 __func__, val, offset);
331}
332
333static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
334{
335 int i, num_ports;
336 u32 reg;
337
338 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
339 num_ports = HCS_MAX_PORTS(reg);
340
341 for (i = 0; i < num_ports; i++) {
342 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
343 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
344 return true;
345 }
346
347 return false;
348}
349
350static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
351{
352 u8 speed;
353
354 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
355 return !!(speed & DSTS_CONNECTSPD_SS);
356}
357
358static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
359{
360 if (mdwc->in_host_mode)
361 return dwc3_msm_is_host_superspeed(mdwc);
362
363 return dwc3_msm_is_dev_superspeed(mdwc);
364}
365
366#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
367/**
368 * Configure the DBM with the BAM's data fifo.
369 * This function is called by the USB BAM Driver
370 * upon initialization.
371 *
372 * @ep - pointer to usb endpoint.
373 * @addr - address of data fifo.
374 * @size - size of data fifo.
375 *
376 */
377int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
378 u32 size, u8 dst_pipe_idx)
379{
380 struct dwc3_ep *dep = to_dwc3_ep(ep);
381 struct dwc3 *dwc = dep->dwc;
382 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
383
384 dev_dbg(mdwc->dev, "%s\n", __func__);
385
386 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
387 dst_pipe_idx);
388}
389
390
391/**
392* Cleanups for msm endpoint on request complete.
393*
394* Also call original request complete.
395*
396* @usb_ep - pointer to usb_ep instance.
397* @request - pointer to usb_request instance.
398*
399* @return int - 0 on success, negative on error.
400*/
401static void dwc3_msm_req_complete_func(struct usb_ep *ep,
402 struct usb_request *request)
403{
404 struct dwc3_ep *dep = to_dwc3_ep(ep);
405 struct dwc3 *dwc = dep->dwc;
406 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
407 struct dwc3_msm_req_complete *req_complete = NULL;
408
409 /* Find original request complete function and remove it from list */
410 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
411 if (req_complete->req == request)
412 break;
413 }
414 if (!req_complete || req_complete->req != request) {
415 dev_err(dep->dwc->dev, "%s: could not find the request\n",
416 __func__);
417 return;
418 }
419 list_del(&req_complete->list_item);
420
421 /*
422 * Release another one TRB to the pool since DBM queue took 2 TRBs
423 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
424 * released only one.
425 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700426 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700427
428 /* Unconfigure dbm ep */
429 dbm_ep_unconfig(mdwc->dbm, dep->number);
430
431 /*
432 * If this is the last endpoint we unconfigured, than reset also
433 * the event buffers; unless unconfiguring the ep due to lpm,
434 * in which case the event buffer only gets reset during the
435 * block reset.
436 */
437 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
438 !dbm_reset_ep_after_lpm(mdwc->dbm))
439 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
440
441 /*
442 * Call original complete function, notice that dwc->lock is already
443 * taken by the caller of this function (dwc3_gadget_giveback()).
444 */
445 request->complete = req_complete->orig_complete;
446 if (request->complete)
447 request->complete(ep, request);
448
449 kfree(req_complete);
450}
451
452
453/**
454* Helper function
455*
456* Reset DBM endpoint.
457*
458* @mdwc - pointer to dwc3_msm instance.
459* @dep - pointer to dwc3_ep instance.
460*
461* @return int - 0 on success, negative on error.
462*/
463static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
464{
465 int ret;
466
467 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
468
469 /* Reset the dbm endpoint */
470 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
471 if (ret) {
472 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
473 __func__);
474 return ret;
475 }
476
477 /*
478 * The necessary delay between asserting and deasserting the dbm ep
479 * reset is based on the number of active endpoints. If there is more
480 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
481 * delay will suffice.
482 */
483 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
484 usleep_range(1000, 1200);
485 else
486 udelay(10);
487 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
488 if (ret) {
489 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
490 __func__);
491 return ret;
492 }
493
494 return 0;
495}
496
497/**
498* Reset the DBM endpoint which is linked to the given USB endpoint.
499*
500* @usb_ep - pointer to usb_ep instance.
501*
502* @return int - 0 on success, negative on error.
503*/
504
505int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
506{
507 struct dwc3_ep *dep = to_dwc3_ep(ep);
508 struct dwc3 *dwc = dep->dwc;
509 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
510
511 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
512}
513EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
514
515
516/**
517* Helper function.
518* See the header of the dwc3_msm_ep_queue function.
519*
520* @dwc3_ep - pointer to dwc3_ep instance.
521* @req - pointer to dwc3_request instance.
522*
523* @return int - 0 on success, negative on error.
524*/
525static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
526{
527 struct dwc3_trb *trb;
528 struct dwc3_trb *trb_link;
529 struct dwc3_gadget_ep_cmd_params params;
530 u32 cmd;
531 int ret = 0;
532
Mayank Rana83ad5822016-08-09 14:17:22 -0700533 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700534 * this request is issued with start transfer. The request will be out
535 * from this list in 2 cases. The first is that the transfer will be
536 * completed (not if the transfer is endless using a circular TRBs with
537 * with link TRB). The second case is an option to do stop stransfer,
538 * this can be initiated by the function driver when calling dequeue.
539 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700540 req->started = true;
541 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700542
543 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700544 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
545 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700546 memset(trb, 0, sizeof(*trb));
547
548 req->trb = trb;
549 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
550 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
551 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
552 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
553 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
554
555 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700556 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
557 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700558 memset(trb_link, 0, sizeof(*trb_link));
559
560 trb_link->bpl = lower_32_bits(req->trb_dma);
561 trb_link->bph = DBM_TRB_BIT |
562 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
563 trb_link->size = 0;
564 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
565
566 /*
567 * Now start the transfer
568 */
569 memset(&params, 0, sizeof(params));
570 params.param0 = 0; /* TDAddr High */
571 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
572
573 /* DBM requires IOC to be set */
574 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700575 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700576 if (ret < 0) {
577 dev_dbg(dep->dwc->dev,
578 "%s: failed to send STARTTRANSFER command\n",
579 __func__);
580
581 list_del(&req->list);
582 return ret;
583 }
584 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700585 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700586
587 return ret;
588}
589
590/**
591* Queue a usb request to the DBM endpoint.
592* This function should be called after the endpoint
593* was enabled by the ep_enable.
594*
595* This function prepares special structure of TRBs which
596* is familiar with the DBM HW, so it will possible to use
597* this endpoint in DBM mode.
598*
599* The TRBs prepared by this function, is one normal TRB
600* which point to a fake buffer, followed by a link TRB
601* that points to the first TRB.
602*
603* The API of this function follow the regular API of
604* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
605*
606* @usb_ep - pointer to usb_ep instance.
607* @request - pointer to usb_request instance.
608* @gfp_flags - possible flags.
609*
610* @return int - 0 on success, negative on error.
611*/
612static int dwc3_msm_ep_queue(struct usb_ep *ep,
613 struct usb_request *request, gfp_t gfp_flags)
614{
615 struct dwc3_request *req = to_dwc3_request(request);
616 struct dwc3_ep *dep = to_dwc3_ep(ep);
617 struct dwc3 *dwc = dep->dwc;
618 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
619 struct dwc3_msm_req_complete *req_complete;
620 unsigned long flags;
621 int ret = 0, size;
622 u8 bam_pipe;
623 bool producer;
624 bool disable_wb;
625 bool internal_mem;
626 bool ioc;
627 bool superspeed;
628
629 if (!(request->udc_priv & MSM_SPS_MODE)) {
630 /* Not SPS mode, call original queue */
631 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
632 __func__);
633
634 return (mdwc->original_ep_ops[dep->number])->queue(ep,
635 request,
636 gfp_flags);
637 }
638
639 /* HW restriction regarding TRB size (8KB) */
640 if (req->request.length < 0x2000) {
641 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
642 return -EINVAL;
643 }
644
645 /*
646 * Override req->complete function, but before doing that,
647 * store it's original pointer in the req_complete_list.
648 */
649 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
650 if (!req_complete)
651 return -ENOMEM;
652
653 req_complete->req = request;
654 req_complete->orig_complete = request->complete;
655 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
656 request->complete = dwc3_msm_req_complete_func;
657
658 /*
659 * Configure the DBM endpoint
660 */
661 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
662 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
663 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
664 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
665 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
666
667 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
668 disable_wb, internal_mem, ioc);
669 if (ret < 0) {
670 dev_err(mdwc->dev,
671 "error %d after calling dbm_ep_config\n", ret);
672 return ret;
673 }
674
675 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
676 __func__, request, ep->name, request->length);
677 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
678 dbm_event_buffer_config(mdwc->dbm,
679 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
680 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
681 DWC3_GEVNTSIZ_SIZE(size));
682
683 /*
684 * We must obtain the lock of the dwc3 core driver,
685 * including disabling interrupts, so we will be sure
686 * that we are the only ones that configure the HW device
687 * core and ensure that we queuing the request will finish
688 * as soon as possible so we will release back the lock.
689 */
690 spin_lock_irqsave(&dwc->lock, flags);
691 if (!dep->endpoint.desc) {
692 dev_err(mdwc->dev,
693 "%s: trying to queue request %p to disabled ep %s\n",
694 __func__, request, ep->name);
695 ret = -EPERM;
696 goto err;
697 }
698
699 if (dep->number == 0 || dep->number == 1) {
700 dev_err(mdwc->dev,
701 "%s: trying to queue dbm request %p to control ep %s\n",
702 __func__, request, ep->name);
703 ret = -EPERM;
704 goto err;
705 }
706
707
Mayank Rana83ad5822016-08-09 14:17:22 -0700708 if (dep->trb_dequeue != dep->trb_enqueue ||
709 !list_empty(&dep->pending_list)
710 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700711 dev_err(mdwc->dev,
712 "%s: trying to queue dbm request %p tp ep %s\n",
713 __func__, request, ep->name);
714 ret = -EPERM;
715 goto err;
716 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700717 dep->trb_dequeue = 0;
718 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700719 }
720
721 ret = __dwc3_msm_ep_queue(dep, req);
722 if (ret < 0) {
723 dev_err(mdwc->dev,
724 "error %d after calling __dwc3_msm_ep_queue\n", ret);
725 goto err;
726 }
727
728 spin_unlock_irqrestore(&dwc->lock, flags);
729 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
730 dbm_set_speed(mdwc->dbm, (u8)superspeed);
731
732 return 0;
733
734err:
735 spin_unlock_irqrestore(&dwc->lock, flags);
736 kfree(req_complete);
737 return ret;
738}
739
740/*
741* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
742*
743* @usb_ep - pointer to usb_ep instance.
744*
745* @return int - XferRscIndex
746*/
747static inline int gsi_get_xfer_index(struct usb_ep *ep)
748{
749 struct dwc3_ep *dep = to_dwc3_ep(ep);
750
751 return dep->resource_index;
752}
753
754/*
755* Fills up the GSI channel information needed in call to IPA driver
756* for GSI channel creation.
757*
758* @usb_ep - pointer to usb_ep instance.
759* @ch_info - output parameter with requested channel info
760*/
761static void gsi_get_channel_info(struct usb_ep *ep,
762 struct gsi_channel_info *ch_info)
763{
764 struct dwc3_ep *dep = to_dwc3_ep(ep);
765 int last_trb_index = 0;
766 struct dwc3 *dwc = dep->dwc;
767 struct usb_gsi_request *request = ch_info->ch_req;
768
769 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
770 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700771 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700772 ch_info->depcmd_hi_addr = 0;
773
774 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
775 &dep->trb_pool[0]);
776 /* Convert to multipled of 1KB */
777 ch_info->const_buffer_size = request->buf_len/1024;
778
779 /* IN direction */
780 if (dep->direction) {
781 /*
782 * Multiply by size of each TRB for xfer_ring_len in bytes.
783 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
784 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
785 */
786 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
787 last_trb_index = 2 * request->num_bufs + 2;
788 } else { /* OUT direction */
789 /*
790 * Multiply by size of each TRB for xfer_ring_len in bytes.
791 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
792 * LINK TRB.
793 */
794 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
795 last_trb_index = request->num_bufs + 1;
796 }
797
798 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
799 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
800 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
801 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
802 DWC3_GEVNTCOUNT(ep->ep_intr_num));
803 ch_info->gevntcount_hi_addr = 0;
804
805 dev_dbg(dwc->dev,
806 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
807 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
808 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
809}
810
811/*
812* Perform StartXfer on GSI EP. Stores XferRscIndex.
813*
814* @usb_ep - pointer to usb_ep instance.
815*
816* @return int - 0 on success
817*/
818static int gsi_startxfer_for_ep(struct usb_ep *ep)
819{
820 int ret;
821 struct dwc3_gadget_ep_cmd_params params;
822 u32 cmd;
823 struct dwc3_ep *dep = to_dwc3_ep(ep);
824 struct dwc3 *dwc = dep->dwc;
825
826 memset(&params, 0, sizeof(params));
827 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
828 params.param0 |= (ep->ep_intr_num << 16);
829 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
830 &dep->trb_pool[0]));
831 cmd = DWC3_DEPCMD_STARTTRANSFER;
832 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700833 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700834
835 if (ret < 0)
836 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700837 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700838 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
839 return ret;
840}
841
842/*
843* Store Ring Base and Doorbell Address for GSI EP
844* for GSI channel creation.
845*
846* @usb_ep - pointer to usb_ep instance.
847* @dbl_addr - Doorbell address obtained from IPA driver
848*/
849static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
850{
851 struct dwc3_ep *dep = to_dwc3_ep(ep);
852 struct dwc3 *dwc = dep->dwc;
853 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
854 int n = ep->ep_intr_num - 1;
855
856 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
857 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
858 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
859
860 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
861 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
862 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
863 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
864}
865
866/*
867* Rings Doorbell for IN GSI Channel
868*
869* @usb_ep - pointer to usb_ep instance.
870* @request - pointer to GSI request. This is used to pass in the
871* address of the GSI doorbell obtained from IPA driver
872*/
873static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
874{
875 void __iomem *gsi_dbl_address_lsb;
876 void __iomem *gsi_dbl_address_msb;
877 dma_addr_t offset;
878 u64 dbl_addr = *((u64 *)request->buf_base_addr);
879 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
880 u32 dbl_hi_addr = (dbl_addr >> 32);
881 u32 num_trbs = (request->num_bufs * 2 + 2);
882 struct dwc3_ep *dep = to_dwc3_ep(ep);
883 struct dwc3 *dwc = dep->dwc;
884 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
885
886 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
887 dbl_lo_addr, sizeof(u32));
888 if (!gsi_dbl_address_lsb)
889 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
890
891 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
892 dbl_hi_addr, sizeof(u32));
893 if (!gsi_dbl_address_msb)
894 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
895
896 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
897 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
898 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
899
900 writel_relaxed(offset, gsi_dbl_address_lsb);
901 writel_relaxed(0, gsi_dbl_address_msb);
902}
903
904/*
905* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
906*
907* @usb_ep - pointer to usb_ep instance.
908* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
909*
910* @return int - 0 on success
911*/
912static int gsi_updatexfer_for_ep(struct usb_ep *ep,
913 struct usb_gsi_request *request)
914{
915 int i;
916 int ret;
917 u32 cmd;
918 int num_trbs = request->num_bufs + 1;
919 struct dwc3_trb *trb;
920 struct dwc3_gadget_ep_cmd_params params;
921 struct dwc3_ep *dep = to_dwc3_ep(ep);
922 struct dwc3 *dwc = dep->dwc;
923
924 for (i = 0; i < num_trbs - 1; i++) {
925 trb = &dep->trb_pool[i];
926 trb->ctrl |= DWC3_TRB_CTRL_HWO;
927 }
928
929 memset(&params, 0, sizeof(params));
930 cmd = DWC3_DEPCMD_UPDATETRANSFER;
931 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700932 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700933 dep->flags |= DWC3_EP_BUSY;
934 if (ret < 0)
935 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
936 return ret;
937}
938
939/*
940* Perform EndXfer on particular GSI EP.
941*
942* @usb_ep - pointer to usb_ep instance.
943*/
944static void gsi_endxfer_for_ep(struct usb_ep *ep)
945{
946 struct dwc3_ep *dep = to_dwc3_ep(ep);
947 struct dwc3 *dwc = dep->dwc;
948
949 dwc3_stop_active_transfer(dwc, dep->number, true);
950}
951
952/*
953* Allocates and configures TRBs for GSI EPs.
954*
955* @usb_ep - pointer to usb_ep instance.
956* @request - pointer to GSI request.
957*
958* @return int - 0 on success
959*/
960static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
961{
962 int i = 0;
963 dma_addr_t buffer_addr = req->dma;
964 struct dwc3_ep *dep = to_dwc3_ep(ep);
965 struct dwc3 *dwc = dep->dwc;
966 struct dwc3_trb *trb;
967 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
968 : (req->num_bufs + 1);
969
970 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
971 num_trbs * sizeof(struct dwc3_trb),
972 num_trbs * sizeof(struct dwc3_trb), 0);
973 if (!dep->trb_dma_pool) {
974 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
975 dep->name);
976 return -ENOMEM;
977 }
978
979 dep->num_trbs = num_trbs;
980
981 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
982 GFP_KERNEL, &dep->trb_pool_dma);
983 if (!dep->trb_pool) {
984 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
985 dep->name);
986 return -ENOMEM;
987 }
988
989 /* IN direction */
990 if (dep->direction) {
991 for (i = 0; i < num_trbs ; i++) {
992 trb = &dep->trb_pool[i];
993 memset(trb, 0, sizeof(*trb));
994 /* Set up first n+1 TRBs for ZLPs */
995 if (i < (req->num_bufs + 1)) {
996 trb->bpl = 0;
997 trb->bph = 0;
998 trb->size = 0;
999 trb->ctrl = DWC3_TRBCTL_NORMAL
1000 | DWC3_TRB_CTRL_IOC;
1001 continue;
1002 }
1003
1004 /* Setup n TRBs pointing to valid buffers */
1005 trb->bpl = lower_32_bits(buffer_addr);
1006 trb->bph = 0;
1007 trb->size = 0;
1008 trb->ctrl = DWC3_TRBCTL_NORMAL
1009 | DWC3_TRB_CTRL_IOC;
1010 buffer_addr += req->buf_len;
1011
1012 /* Set up the Link TRB at the end */
1013 if (i == (num_trbs - 1)) {
1014 trb->bpl = dwc3_trb_dma_offset(dep,
1015 &dep->trb_pool[0]);
1016 trb->bph = (1 << 23) | (1 << 21)
1017 | (ep->ep_intr_num << 16);
1018 trb->size = 0;
1019 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1020 | DWC3_TRB_CTRL_HWO;
1021 }
1022 }
1023 } else { /* OUT direction */
1024
1025 for (i = 0; i < num_trbs ; i++) {
1026
1027 trb = &dep->trb_pool[i];
1028 memset(trb, 0, sizeof(*trb));
1029 trb->bpl = lower_32_bits(buffer_addr);
1030 trb->bph = 0;
1031 trb->size = req->buf_len;
1032 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1033 | DWC3_TRB_CTRL_CSP
1034 | DWC3_TRB_CTRL_ISP_IMI;
1035 buffer_addr += req->buf_len;
1036
1037 /* Set up the Link TRB at the end */
1038 if (i == (num_trbs - 1)) {
1039 trb->bpl = dwc3_trb_dma_offset(dep,
1040 &dep->trb_pool[0]);
1041 trb->bph = (1 << 23) | (1 << 21)
1042 | (ep->ep_intr_num << 16);
1043 trb->size = 0;
1044 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1045 | DWC3_TRB_CTRL_HWO;
1046 }
1047 }
1048 }
1049 return 0;
1050}
1051
1052/*
1053* Frees TRBs for GSI EPs.
1054*
1055* @usb_ep - pointer to usb_ep instance.
1056*
1057*/
1058static void gsi_free_trbs(struct usb_ep *ep)
1059{
1060 struct dwc3_ep *dep = to_dwc3_ep(ep);
1061
1062 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1063 return;
1064
1065 /* Free TRBs and TRB pool for EP */
1066 if (dep->trb_dma_pool) {
1067 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1068 dep->trb_pool_dma);
1069 dma_pool_destroy(dep->trb_dma_pool);
1070 dep->trb_pool = NULL;
1071 dep->trb_pool_dma = 0;
1072 dep->trb_dma_pool = NULL;
1073 }
1074}
1075/*
1076* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1077*
1078* @usb_ep - pointer to usb_ep instance.
1079* @request - pointer to GSI request.
1080*/
1081static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1082{
1083 struct dwc3_ep *dep = to_dwc3_ep(ep);
1084 struct dwc3 *dwc = dep->dwc;
1085 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1086 struct dwc3_gadget_ep_cmd_params params;
1087 const struct usb_endpoint_descriptor *desc = ep->desc;
1088 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1089 u32 reg;
1090
1091 memset(&params, 0x00, sizeof(params));
1092
1093 /* Configure GSI EP */
1094 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1095 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1096
1097 /* Burst size is only needed in SuperSpeed mode */
1098 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1099 u32 burst = dep->endpoint.maxburst - 1;
1100
1101 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1102 }
1103
1104 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1105 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1106 | DWC3_DEPCFG_STREAM_EVENT_EN;
1107 dep->stream_capable = true;
1108 }
1109
1110 /* Set EP number */
1111 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1112
1113 /* Set interrupter number for GSI endpoints */
1114 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1115
1116 /* Enable XferInProgress and XferComplete Interrupts */
1117 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1118 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1119 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1120 /*
1121 * We must use the lower 16 TX FIFOs even though
1122 * HW might have more
1123 */
1124 /* Remove FIFO Number for GSI EP*/
1125 if (dep->direction)
1126 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1127
1128 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1129
1130 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1131 params.param0, params.param1, params.param2, dep->name);
1132
Mayank Rana83ad5822016-08-09 14:17:22 -07001133 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001134
1135 /* Set XferRsc Index for GSI EP */
1136 if (!(dep->flags & DWC3_EP_ENABLED)) {
1137 memset(&params, 0x00, sizeof(params));
1138 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001139 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001140 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1141
1142 dep->endpoint.desc = desc;
1143 dep->comp_desc = comp_desc;
1144 dep->type = usb_endpoint_type(desc);
1145 dep->flags |= DWC3_EP_ENABLED;
1146 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1147 reg |= DWC3_DALEPENA_EP(dep->number);
1148 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1149 }
1150
1151}
1152
1153/*
1154* Enables USB wrapper for GSI
1155*
1156* @usb_ep - pointer to usb_ep instance.
1157*/
1158static void gsi_enable(struct usb_ep *ep)
1159{
1160 struct dwc3_ep *dep = to_dwc3_ep(ep);
1161 struct dwc3 *dwc = dep->dwc;
1162 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1163
1164 dwc3_msm_write_reg_field(mdwc->base,
1165 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1166 dwc3_msm_write_reg_field(mdwc->base,
1167 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1168 dwc3_msm_write_reg_field(mdwc->base,
1169 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1170 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1171 dwc3_msm_write_reg_field(mdwc->base,
1172 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1173}
1174
1175/*
1176* Block or allow doorbell towards GSI
1177*
1178* @usb_ep - pointer to usb_ep instance.
1179* @request - pointer to GSI request. In this case num_bufs is used as a bool
1180* to set or clear the doorbell bit
1181*/
1182static void gsi_set_clear_dbell(struct usb_ep *ep,
1183 bool block_db)
1184{
1185
1186 struct dwc3_ep *dep = to_dwc3_ep(ep);
1187 struct dwc3 *dwc = dep->dwc;
1188 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1189
1190 dwc3_msm_write_reg_field(mdwc->base,
1191 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1192}
1193
1194/*
1195* Performs necessary checks before stopping GSI channels
1196*
1197* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1198*/
1199static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1200{
1201 u32 timeout = 1500;
1202 u32 reg = 0;
1203 struct dwc3_ep *dep = to_dwc3_ep(ep);
1204 struct dwc3 *dwc = dep->dwc;
1205 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1206
1207 while (dwc3_msm_read_reg_field(mdwc->base,
1208 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1209 if (!timeout--) {
1210 dev_err(mdwc->dev,
1211 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1212 return false;
1213 }
1214 }
1215 /* Check for U3 only if we are not handling Function Suspend */
1216 if (!f_suspend) {
1217 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1218 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1219 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1220 return false;
1221 }
1222 }
1223
1224 return true;
1225}
1226
1227
1228/**
1229* Performs GSI operations or GSI EP related operations.
1230*
1231* @usb_ep - pointer to usb_ep instance.
1232* @op_data - pointer to opcode related data.
1233* @op - GSI related or GSI EP related op code.
1234*
1235* @return int - 0 on success, negative on error.
1236* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1237*/
1238static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1239 void *op_data, enum gsi_ep_op op)
1240{
1241 u32 ret = 0;
1242 struct dwc3_ep *dep = to_dwc3_ep(ep);
1243 struct dwc3 *dwc = dep->dwc;
1244 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1245 struct usb_gsi_request *request;
1246 struct gsi_channel_info *ch_info;
1247 bool block_db, f_suspend;
1248
1249 switch (op) {
1250 case GSI_EP_OP_PREPARE_TRBS:
1251 request = (struct usb_gsi_request *)op_data;
1252 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1253 ret = gsi_prepare_trbs(ep, request);
1254 break;
1255 case GSI_EP_OP_FREE_TRBS:
1256 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1257 gsi_free_trbs(ep);
1258 break;
1259 case GSI_EP_OP_CONFIG:
1260 request = (struct usb_gsi_request *)op_data;
1261 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
1262 gsi_configure_ep(ep, request);
1263 break;
1264 case GSI_EP_OP_STARTXFER:
1265 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
1266 ret = gsi_startxfer_for_ep(ep);
1267 break;
1268 case GSI_EP_OP_GET_XFER_IDX:
1269 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1270 ret = gsi_get_xfer_index(ep);
1271 break;
1272 case GSI_EP_OP_STORE_DBL_INFO:
1273 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1274 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1275 break;
1276 case GSI_EP_OP_ENABLE_GSI:
1277 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1278 gsi_enable(ep);
1279 break;
1280 case GSI_EP_OP_GET_CH_INFO:
1281 ch_info = (struct gsi_channel_info *)op_data;
1282 gsi_get_channel_info(ep, ch_info);
1283 break;
1284 case GSI_EP_OP_RING_IN_DB:
1285 request = (struct usb_gsi_request *)op_data;
1286 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1287 gsi_ring_in_db(ep, request);
1288 break;
1289 case GSI_EP_OP_UPDATEXFER:
1290 request = (struct usb_gsi_request *)op_data;
1291 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
1292 ret = gsi_updatexfer_for_ep(ep, request);
1293 break;
1294 case GSI_EP_OP_ENDXFER:
1295 request = (struct usb_gsi_request *)op_data;
1296 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
1297 gsi_endxfer_for_ep(ep);
1298 break;
1299 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1300 block_db = *((bool *)op_data);
1301 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1302 block_db);
1303 gsi_set_clear_dbell(ep, block_db);
1304 break;
1305 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1306 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1307 f_suspend = *((bool *)op_data);
1308 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1309 break;
1310 case GSI_EP_OP_DISABLE:
1311 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1312 ret = ep->ops->disable(ep);
1313 break;
1314 default:
1315 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1316 }
1317
1318 return ret;
1319}
1320
1321/**
1322 * Configure MSM endpoint.
1323 * This function do specific configurations
1324 * to an endpoint which need specific implementaion
1325 * in the MSM architecture.
1326 *
1327 * This function should be called by usb function/class
1328 * layer which need a support from the specific MSM HW
1329 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1330 *
1331 * @ep - a pointer to some usb_ep instance
1332 *
1333 * @return int - 0 on success, negetive on error.
1334 */
1335int msm_ep_config(struct usb_ep *ep)
1336{
1337 struct dwc3_ep *dep = to_dwc3_ep(ep);
1338 struct dwc3 *dwc = dep->dwc;
1339 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1340 struct usb_ep_ops *new_ep_ops;
1341
1342
1343 /* Save original ep ops for future restore*/
1344 if (mdwc->original_ep_ops[dep->number]) {
1345 dev_err(mdwc->dev,
1346 "ep [%s,%d] already configured as msm endpoint\n",
1347 ep->name, dep->number);
1348 return -EPERM;
1349 }
1350 mdwc->original_ep_ops[dep->number] = ep->ops;
1351
1352 /* Set new usb ops as we like */
1353 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1354 if (!new_ep_ops)
1355 return -ENOMEM;
1356
1357 (*new_ep_ops) = (*ep->ops);
1358 new_ep_ops->queue = dwc3_msm_ep_queue;
1359 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1360 ep->ops = new_ep_ops;
1361
1362 /*
1363 * Do HERE more usb endpoint configurations
1364 * which are specific to MSM.
1365 */
1366
1367 return 0;
1368}
1369EXPORT_SYMBOL(msm_ep_config);
1370
1371/**
1372 * Un-configure MSM endpoint.
1373 * Tear down configurations done in the
1374 * dwc3_msm_ep_config function.
1375 *
1376 * @ep - a pointer to some usb_ep instance
1377 *
1378 * @return int - 0 on success, negative on error.
1379 */
1380int msm_ep_unconfig(struct usb_ep *ep)
1381{
1382 struct dwc3_ep *dep = to_dwc3_ep(ep);
1383 struct dwc3 *dwc = dep->dwc;
1384 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1385 struct usb_ep_ops *old_ep_ops;
1386
1387 /* Restore original ep ops */
1388 if (!mdwc->original_ep_ops[dep->number]) {
1389 dev_err(mdwc->dev,
1390 "ep [%s,%d] was not configured as msm endpoint\n",
1391 ep->name, dep->number);
1392 return -EINVAL;
1393 }
1394 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1395 ep->ops = mdwc->original_ep_ops[dep->number];
1396 mdwc->original_ep_ops[dep->number] = NULL;
1397 kfree(old_ep_ops);
1398
1399 /*
1400 * Do HERE more usb endpoint un-configurations
1401 * which are specific to MSM.
1402 */
1403
1404 return 0;
1405}
1406EXPORT_SYMBOL(msm_ep_unconfig);
1407#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1408
1409static void dwc3_resume_work(struct work_struct *w);
1410
1411static void dwc3_restart_usb_work(struct work_struct *w)
1412{
1413 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1414 restart_usb_work);
1415 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1416 unsigned int timeout = 50;
1417
1418 dev_dbg(mdwc->dev, "%s\n", __func__);
1419
1420 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1421 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1422 return;
1423 }
1424
1425 /* guard against concurrent VBUS handling */
1426 mdwc->in_restart = true;
1427
1428 if (!mdwc->vbus_active) {
1429 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1430 dwc->err_evt_seen = false;
1431 mdwc->in_restart = false;
1432 return;
1433 }
1434
Mayank Rana511f3b22016-08-02 12:00:11 -07001435 /* Reset active USB connection */
1436 dwc3_resume_work(&mdwc->resume_work);
1437
1438 /* Make sure disconnect is processed before sending connect */
1439 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1440 msleep(20);
1441
1442 if (!timeout) {
1443 dev_dbg(mdwc->dev,
1444 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001445 pm_runtime_suspend(mdwc->dev);
1446 }
1447
1448 /* Force reconnect only if cable is still connected */
1449 if (mdwc->vbus_active) {
1450 mdwc->in_restart = false;
1451 dwc3_resume_work(&mdwc->resume_work);
1452 }
1453
1454 dwc->err_evt_seen = false;
1455 flush_delayed_work(&mdwc->sm_work);
1456}
1457
1458/*
1459 * Check whether the DWC3 requires resetting the ep
1460 * after going to Low Power Mode (lpm)
1461 */
1462bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1463{
1464 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1465 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1466
1467 return dbm_reset_ep_after_lpm(mdwc->dbm);
1468}
1469EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1470
1471/*
1472 * Config Global Distributed Switch Controller (GDSC)
1473 * to support controller power collapse
1474 */
1475static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1476{
1477 int ret;
1478
1479 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1480 return -EPERM;
1481
1482 if (on) {
1483 ret = regulator_enable(mdwc->dwc3_gdsc);
1484 if (ret) {
1485 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1486 return ret;
1487 }
1488 } else {
1489 ret = regulator_disable(mdwc->dwc3_gdsc);
1490 if (ret) {
1491 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1492 return ret;
1493 }
1494 }
1495
1496 return ret;
1497}
1498
1499static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1500{
1501 int ret = 0;
1502
1503 if (assert) {
1504 disable_irq(mdwc->pwr_event_irq);
1505 /* Using asynchronous block reset to the hardware */
1506 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1507 clk_disable_unprepare(mdwc->utmi_clk);
1508 clk_disable_unprepare(mdwc->sleep_clk);
1509 clk_disable_unprepare(mdwc->core_clk);
1510 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301511 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001512 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301513 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001514 } else {
1515 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301516 ret = reset_control_deassert(mdwc->core_reset);
1517 if (ret)
1518 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001519 ndelay(200);
1520 clk_prepare_enable(mdwc->iface_clk);
1521 clk_prepare_enable(mdwc->core_clk);
1522 clk_prepare_enable(mdwc->sleep_clk);
1523 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001524 enable_irq(mdwc->pwr_event_irq);
1525 }
1526
1527 return ret;
1528}
1529
1530static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1531{
1532 u32 guctl, gfladj = 0;
1533
1534 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1535 guctl &= ~DWC3_GUCTL_REFCLKPER;
1536
1537 /* GFLADJ register is used starting with revision 2.50a */
1538 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1539 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1540 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1541 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1542 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1543 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1544 }
1545
1546 /* Refer to SNPS Databook Table 6-55 for calculations used */
1547 switch (mdwc->utmi_clk_rate) {
1548 case 19200000:
1549 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1550 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1551 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1552 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1553 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1554 break;
1555 case 24000000:
1556 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1557 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1558 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1559 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1560 break;
1561 default:
1562 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1563 mdwc->utmi_clk_rate);
1564 break;
1565 }
1566
1567 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1568 if (gfladj)
1569 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1570}
1571
1572/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1573static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1574{
1575 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1576 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1577 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1578 BIT(2), 1);
1579
1580 /*
1581 * Enable master clock for RAMs to allow BAM to access RAMs when
1582 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1583 * are seen where RAM clocks get turned OFF in SS mode
1584 */
1585 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1586 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1587
1588}
1589
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001590static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1591{
1592 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1593 vbus_draw_work);
1594 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1595
1596 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1597}
1598
Mayank Rana511f3b22016-08-02 12:00:11 -07001599static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1600{
1601 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1602 u32 reg;
1603
1604 if (dwc->revision < DWC3_REVISION_230A)
1605 return;
1606
1607 switch (event) {
1608 case DWC3_CONTROLLER_ERROR_EVENT:
1609 dev_info(mdwc->dev,
1610 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1611 dwc->irq_cnt);
1612
1613 dwc3_gadget_disable_irq(dwc);
1614
1615 /* prevent core from generating interrupts until recovery */
1616 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1617 reg |= DWC3_GCTL_CORESOFTRESET;
1618 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1619
1620 /* restart USB which performs full reset and reconnect */
1621 schedule_work(&mdwc->restart_usb_work);
1622 break;
1623 case DWC3_CONTROLLER_RESET_EVENT:
1624 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1625 /* HS & SSPHYs get reset as part of core soft reset */
1626 dwc3_msm_qscratch_reg_init(mdwc);
1627 break;
1628 case DWC3_CONTROLLER_POST_RESET_EVENT:
1629 dev_dbg(mdwc->dev,
1630 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1631
1632 /*
1633 * Below sequence is used when controller is working without
1634 * having ssphy and only USB high speed is supported.
1635 */
1636 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1637 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1638 dwc3_msm_read_reg(mdwc->base,
1639 QSCRATCH_GENERAL_CFG)
1640 | PIPE_UTMI_CLK_DIS);
1641
1642 usleep_range(2, 5);
1643
1644
1645 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1646 dwc3_msm_read_reg(mdwc->base,
1647 QSCRATCH_GENERAL_CFG)
1648 | PIPE_UTMI_CLK_SEL
1649 | PIPE3_PHYSTATUS_SW);
1650
1651 usleep_range(2, 5);
1652
1653 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1654 dwc3_msm_read_reg(mdwc->base,
1655 QSCRATCH_GENERAL_CFG)
1656 & ~PIPE_UTMI_CLK_DIS);
1657 }
1658
1659 dwc3_msm_update_ref_clk(mdwc);
1660 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1661 break;
1662 case DWC3_CONTROLLER_CONNDONE_EVENT:
1663 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1664 /*
1665 * Add power event if the dbm indicates coming out of L1 by
1666 * interrupt
1667 */
1668 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1669 dwc3_msm_write_reg_field(mdwc->base,
1670 PWR_EVNT_IRQ_MASK_REG,
1671 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1672
1673 atomic_set(&dwc->in_lpm, 0);
1674 break;
1675 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1676 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1677 if (dwc->enable_bus_suspend) {
1678 mdwc->suspend = dwc->b_suspend;
1679 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1680 }
1681 break;
1682 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1683 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001684 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001685 break;
1686 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1687 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
1688 dwc3_restart_usb_work(&mdwc->restart_usb_work);
1689 break;
1690 default:
1691 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1692 break;
1693 }
1694}
1695
1696static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1697{
1698 int ret = 0;
1699
1700 if (core_reset) {
1701 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1702 if (ret)
1703 return;
1704
1705 usleep_range(1000, 1200);
1706 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1707 if (ret)
1708 return;
1709
1710 usleep_range(10000, 12000);
1711 }
1712
1713 if (mdwc->dbm) {
1714 /* Reset the DBM */
1715 dbm_soft_reset(mdwc->dbm, 1);
1716 usleep_range(1000, 1200);
1717 dbm_soft_reset(mdwc->dbm, 0);
1718
1719 /*enable DBM*/
1720 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1721 DBM_EN_MASK, 0x1);
1722 dbm_enable(mdwc->dbm);
1723 }
1724}
1725
1726static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1727{
1728 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1729 u32 val;
1730
1731 /* Configure AHB2PHY for one wait state read/write */
1732 if (mdwc->ahb2phy_base) {
1733 clk_prepare_enable(mdwc->cfg_ahb_clk);
1734 val = readl_relaxed(mdwc->ahb2phy_base +
1735 PERIPH_SS_AHB2PHY_TOP_CFG);
1736 if (val != ONE_READ_WRITE_WAIT) {
1737 writel_relaxed(ONE_READ_WRITE_WAIT,
1738 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1739 /* complete above write before configuring USB PHY. */
1740 mb();
1741 }
1742 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1743 }
1744
1745 if (!mdwc->init) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001746 dwc3_core_pre_init(dwc);
1747 mdwc->init = true;
1748 }
1749
1750 dwc3_core_init(dwc);
1751 /* Re-configure event buffers */
1752 dwc3_event_buffers_setup(dwc);
1753}
1754
1755static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1756{
1757 unsigned long timeout;
1758 u32 reg = 0;
1759
1760 if ((mdwc->in_host_mode || mdwc->vbus_active)
1761 && dwc3_msm_is_superspeed(mdwc)) {
1762 if (!atomic_read(&mdwc->in_p3)) {
1763 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1764 return -EBUSY;
1765 }
1766 }
1767
1768 /* Clear previous L2 events */
1769 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1770 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1771
1772 /* Prepare HSPHY for suspend */
1773 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1774 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1775 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1776
1777 /* Wait for PHY to go into L2 */
1778 timeout = jiffies + msecs_to_jiffies(5);
1779 while (!time_after(jiffies, timeout)) {
1780 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1781 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1782 break;
1783 }
1784 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1785 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1786
1787 /* Clear L2 event bit */
1788 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1789 PWR_EVNT_LPM_IN_L2_MASK);
1790
1791 return 0;
1792}
1793
1794static void dwc3_msm_bus_vote_w(struct work_struct *w)
1795{
1796 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1797 int ret;
1798
1799 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1800 mdwc->bus_vote);
1801 if (ret)
1802 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1803}
1804
1805static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1806{
1807 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1808 int i, num_ports;
1809 u32 reg;
1810
1811 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1812 if (mdwc->in_host_mode) {
1813 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1814 num_ports = HCS_MAX_PORTS(reg);
1815 for (i = 0; i < num_ports; i++) {
1816 reg = dwc3_msm_read_reg(mdwc->base,
1817 USB3_PORTSC + i*0x10);
1818 if (reg & PORT_PE) {
1819 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1820 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1821 else if (DEV_LOWSPEED(reg))
1822 mdwc->hs_phy->flags |= PHY_LS_MODE;
1823 }
1824 }
1825 } else {
1826 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1827 dwc->gadget.speed == USB_SPEED_FULL)
1828 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1829 else if (dwc->gadget.speed == USB_SPEED_LOW)
1830 mdwc->hs_phy->flags |= PHY_LS_MODE;
1831 }
1832}
1833
1834
1835static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1836{
Mayank Rana83ad5822016-08-09 14:17:22 -07001837 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001838 bool can_suspend_ssphy;
1839 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001840 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001841
1842 if (atomic_read(&dwc->in_lpm)) {
1843 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1844 return 0;
1845 }
1846
1847 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001848 evt = dwc->ev_buf;
1849 if ((evt->flags & DWC3_EVENT_PENDING)) {
1850 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001851 "%s: %d device events pending, abort suspend\n",
1852 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07001853 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07001854 }
1855 }
1856
1857 if (!mdwc->vbus_active && dwc->is_drd &&
1858 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
1859 /*
1860 * In some cases, the pm_runtime_suspend may be called by
1861 * usb_bam when there is pending lpm flag. However, if this is
1862 * done when cable was disconnected and otg state has not
1863 * yet changed to IDLE, then it means OTG state machine
1864 * is running and we race against it. So cancel LPM for now,
1865 * and OTG state machine will go for LPM later, after completing
1866 * transition to IDLE state.
1867 */
1868 dev_dbg(mdwc->dev,
1869 "%s: cable disconnected while not in idle otg state\n",
1870 __func__);
1871 return -EBUSY;
1872 }
1873
1874 /*
1875 * Check if device is not in CONFIGURED state
1876 * then check controller state of L2 and break
1877 * LPM sequence. Check this for device bus suspend case.
1878 */
1879 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
1880 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
1881 pr_err("%s(): Trying to go in LPM with state:%d\n",
1882 __func__, dwc->gadget.state);
1883 pr_err("%s(): LPM is not performed.\n", __func__);
1884 return -EBUSY;
1885 }
1886
1887 ret = dwc3_msm_prepare_suspend(mdwc);
1888 if (ret)
1889 return ret;
1890
1891 /* Initialize variables here */
1892 can_suspend_ssphy = !(mdwc->in_host_mode &&
1893 dwc3_msm_is_host_superspeed(mdwc));
1894
1895 /* Disable core irq */
1896 if (dwc->irq)
1897 disable_irq(dwc->irq);
1898
1899 /* disable power event irq, hs and ss phy irq is used as wake up src */
1900 disable_irq(mdwc->pwr_event_irq);
1901
1902 dwc3_set_phy_speed_flags(mdwc);
1903 /* Suspend HS PHY */
1904 usb_phy_set_suspend(mdwc->hs_phy, 1);
1905
1906 /* Suspend SS PHY */
1907 if (can_suspend_ssphy) {
1908 /* indicate phy about SS mode */
1909 if (dwc3_msm_is_superspeed(mdwc))
1910 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
1911 usb_phy_set_suspend(mdwc->ss_phy, 1);
1912 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
1913 }
1914
1915 /* make sure above writes are completed before turning off clocks */
1916 wmb();
1917
1918 /* Disable clocks */
1919 if (mdwc->bus_aggr_clk)
1920 clk_disable_unprepare(mdwc->bus_aggr_clk);
1921 clk_disable_unprepare(mdwc->utmi_clk);
1922
Hemant Kumar633dc332016-08-10 13:41:05 -07001923 /* Memory core: OFF, Memory periphery: OFF */
1924 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
1925 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
1926 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
1927 }
1928
Mayank Rana511f3b22016-08-02 12:00:11 -07001929 clk_set_rate(mdwc->core_clk, 19200000);
1930 clk_disable_unprepare(mdwc->core_clk);
1931 /*
1932 * Disable iface_clk only after core_clk as core_clk has FSM
1933 * depedency on iface_clk. Hence iface_clk should be turned off
1934 * after core_clk is turned off.
1935 */
1936 clk_disable_unprepare(mdwc->iface_clk);
1937 /* USB PHY no more requires TCXO */
1938 clk_disable_unprepare(mdwc->xo_clk);
1939
1940 /* Perform controller power collapse */
1941 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
1942 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
1943 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
1944 dwc3_msm_config_gdsc(mdwc, 0);
1945 clk_disable_unprepare(mdwc->sleep_clk);
1946 }
1947
1948 /* Remove bus voting */
1949 if (mdwc->bus_perf_client) {
1950 mdwc->bus_vote = 0;
1951 schedule_work(&mdwc->bus_vote_w);
1952 }
1953
1954 /*
1955 * release wakeup source with timeout to defer system suspend to
1956 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
1957 * event is received.
1958 */
1959 if (mdwc->lpm_to_suspend_delay) {
1960 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
1961 mdwc->lpm_to_suspend_delay);
1962 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
1963 } else {
1964 pm_relax(mdwc->dev);
1965 }
1966
1967 atomic_set(&dwc->in_lpm, 1);
1968
1969 /*
1970 * with DCP or during cable disconnect, we dont require wakeup
1971 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
1972 * case of host bus suspend and device bus suspend.
1973 */
1974 if (mdwc->vbus_active || mdwc->in_host_mode) {
1975 enable_irq_wake(mdwc->hs_phy_irq);
1976 enable_irq(mdwc->hs_phy_irq);
1977 if (mdwc->ss_phy_irq) {
1978 enable_irq_wake(mdwc->ss_phy_irq);
1979 enable_irq(mdwc->ss_phy_irq);
1980 }
1981 /*
1982 * Enable power event irq during bus suspend in host mode for
1983 * mapping MPM pin for DP so that wakeup can happen in system
1984 * suspend.
1985 */
1986 if (mdwc->in_host_mode) {
1987 enable_irq(mdwc->pwr_event_irq);
1988 enable_irq_wake(mdwc->pwr_event_irq);
1989 }
1990 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
1991 }
1992
1993 dev_info(mdwc->dev, "DWC3 in low power mode\n");
1994 return 0;
1995}
1996
1997static int dwc3_msm_resume(struct dwc3_msm *mdwc)
1998{
1999 int ret;
2000 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2001
2002 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2003
2004 if (!atomic_read(&dwc->in_lpm)) {
2005 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2006 return 0;
2007 }
2008
2009 pm_stay_awake(mdwc->dev);
2010
2011 /* Enable bus voting */
2012 if (mdwc->bus_perf_client) {
2013 mdwc->bus_vote = 1;
2014 schedule_work(&mdwc->bus_vote_w);
2015 }
2016
2017 /* Vote for TCXO while waking up USB HSPHY */
2018 ret = clk_prepare_enable(mdwc->xo_clk);
2019 if (ret)
2020 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2021 __func__, ret);
2022
2023 /* Restore controller power collapse */
2024 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2025 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2026 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302027 ret = reset_control_assert(mdwc->core_reset);
2028 if (ret)
2029 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2030 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002031 /* HW requires a short delay for reset to take place properly */
2032 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302033 ret = reset_control_deassert(mdwc->core_reset);
2034 if (ret)
2035 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2036 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002037 clk_prepare_enable(mdwc->sleep_clk);
2038 }
2039
2040 /*
2041 * Enable clocks
2042 * Turned ON iface_clk before core_clk due to FSM depedency.
2043 */
2044 clk_prepare_enable(mdwc->iface_clk);
2045 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2046 clk_prepare_enable(mdwc->core_clk);
2047 clk_prepare_enable(mdwc->utmi_clk);
2048 if (mdwc->bus_aggr_clk)
2049 clk_prepare_enable(mdwc->bus_aggr_clk);
2050
2051 /* Resume SS PHY */
2052 if (mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
2053 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2054 if (mdwc->typec_orientation == ORIENTATION_CC1)
2055 mdwc->ss_phy->flags |= PHY_LANE_A;
2056 if (mdwc->typec_orientation == ORIENTATION_CC2)
2057 mdwc->ss_phy->flags |= PHY_LANE_B;
2058 usb_phy_set_suspend(mdwc->ss_phy, 0);
2059 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2060 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2061 }
2062
2063 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2064 /* Resume HS PHY */
2065 usb_phy_set_suspend(mdwc->hs_phy, 0);
2066
2067 /* Recover from controller power collapse */
2068 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2069 u32 tmp;
2070
2071 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2072
2073 dwc3_msm_power_collapse_por(mdwc);
2074
2075 /* Get initial P3 status and enable IN_P3 event */
2076 tmp = dwc3_msm_read_reg_field(mdwc->base,
2077 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2078 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2079 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2080 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2081
2082 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2083 }
2084
2085 atomic_set(&dwc->in_lpm, 0);
2086
2087 /* Disable HSPHY auto suspend */
2088 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2089 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2090 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2091 DWC3_GUSB2PHYCFG_SUSPHY));
2092
2093 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2094 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2095 disable_irq_wake(mdwc->hs_phy_irq);
2096 disable_irq_nosync(mdwc->hs_phy_irq);
2097 if (mdwc->ss_phy_irq) {
2098 disable_irq_wake(mdwc->ss_phy_irq);
2099 disable_irq_nosync(mdwc->ss_phy_irq);
2100 }
2101 if (mdwc->in_host_mode) {
2102 disable_irq_wake(mdwc->pwr_event_irq);
2103 disable_irq(mdwc->pwr_event_irq);
2104 }
2105 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2106 }
2107
2108 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2109
2110 /* enable power evt irq for IN P3 detection */
2111 enable_irq(mdwc->pwr_event_irq);
2112
2113 /* Enable core irq */
2114 if (dwc->irq)
2115 enable_irq(dwc->irq);
2116
2117 /*
2118 * Handle other power events that could not have been handled during
2119 * Low Power Mode
2120 */
2121 dwc3_pwr_event_handler(mdwc);
2122
Mayank Rana511f3b22016-08-02 12:00:11 -07002123 return 0;
2124}
2125
2126/**
2127 * dwc3_ext_event_notify - callback to handle events from external transceiver
2128 *
2129 * Returns 0 on success
2130 */
2131static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2132{
2133 /* Flush processing any pending events before handling new ones */
2134 flush_delayed_work(&mdwc->sm_work);
2135
2136 if (mdwc->id_state == DWC3_ID_FLOAT) {
2137 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2138 set_bit(ID, &mdwc->inputs);
2139 } else {
2140 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2141 clear_bit(ID, &mdwc->inputs);
2142 }
2143
2144 if (mdwc->vbus_active && !mdwc->in_restart) {
2145 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2146 set_bit(B_SESS_VLD, &mdwc->inputs);
2147 } else {
2148 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2149 clear_bit(B_SESS_VLD, &mdwc->inputs);
2150 }
2151
2152 if (mdwc->suspend) {
2153 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2154 set_bit(B_SUSPEND, &mdwc->inputs);
2155 } else {
2156 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2157 clear_bit(B_SUSPEND, &mdwc->inputs);
2158 }
2159
2160 schedule_delayed_work(&mdwc->sm_work, 0);
2161}
2162
2163static void dwc3_resume_work(struct work_struct *w)
2164{
2165 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002166
2167 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2168
2169 /*
2170 * exit LPM first to meet resume timeline from device side.
2171 * resume_pending flag would prevent calling
2172 * dwc3_msm_resume() in case we are here due to system
2173 * wide resume without usb cable connected. This flag is set
2174 * only in case of power event irq in lpm.
2175 */
2176 if (mdwc->resume_pending) {
2177 dwc3_msm_resume(mdwc);
2178 mdwc->resume_pending = false;
2179 }
2180
Mayank Rana83ad5822016-08-09 14:17:22 -07002181 if (atomic_read(&mdwc->pm_suspended))
Mayank Rana511f3b22016-08-02 12:00:11 -07002182 /* let pm resume kick in resume work later */
2183 return;
Mayank Rana511f3b22016-08-02 12:00:11 -07002184 dwc3_ext_event_notify(mdwc);
2185}
2186
2187static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2188{
2189 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2190 u32 irq_stat, irq_clear = 0;
2191
2192 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2193 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2194
2195 /* Check for P3 events */
2196 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2197 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2198 /* Can't tell if entered or exit P3, so check LINKSTATE */
2199 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2200 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2201 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2202 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2203
2204 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2205 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2206 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2207 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2208 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2209 atomic_set(&mdwc->in_p3, 0);
2210 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2211 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2212 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2213 atomic_set(&mdwc->in_p3, 1);
2214 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2215 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2216 }
2217
2218 /* Clear L2 exit */
2219 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2220 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2221 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2222 }
2223
2224 /* Handle exit from L1 events */
2225 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2226 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2227 __func__);
2228 if (usb_gadget_wakeup(&dwc->gadget))
2229 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2230 __func__);
2231 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2232 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2233 }
2234
2235 /* Unhandled events */
2236 if (irq_stat)
2237 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2238 __func__, irq_stat);
2239
2240 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2241}
2242
2243static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2244{
2245 struct dwc3_msm *mdwc = _mdwc;
2246 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2247
2248 dev_dbg(mdwc->dev, "%s\n", __func__);
2249
2250 if (atomic_read(&dwc->in_lpm))
2251 dwc3_resume_work(&mdwc->resume_work);
2252 else
2253 dwc3_pwr_event_handler(mdwc);
2254
Mayank Rana511f3b22016-08-02 12:00:11 -07002255 return IRQ_HANDLED;
2256}
2257
2258static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2259{
2260 struct dwc3_msm *mdwc = data;
2261 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2262
2263 dwc->t_pwr_evt_irq = ktime_get();
2264 dev_dbg(mdwc->dev, "%s received\n", __func__);
2265 /*
2266 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2267 * which interrupts have been triggered, as the clocks are disabled.
2268 * Resume controller by waking up pwr event irq thread.After re-enabling
2269 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2270 * all other power events.
2271 */
2272 if (atomic_read(&dwc->in_lpm)) {
2273 /* set this to call dwc3_msm_resume() */
2274 mdwc->resume_pending = true;
2275 return IRQ_WAKE_THREAD;
2276 }
2277
2278 dwc3_pwr_event_handler(mdwc);
2279 return IRQ_HANDLED;
2280}
2281
2282static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2283 unsigned long action, void *hcpu)
2284{
2285 uint32_t cpu = (uintptr_t)hcpu;
2286 struct dwc3_msm *mdwc =
2287 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2288
2289 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2290 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2291 cpu_to_affin, mdwc->irq_to_affin);
2292 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2293 }
2294
2295 return NOTIFY_OK;
2296}
2297
2298static void dwc3_otg_sm_work(struct work_struct *w);
2299
2300static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2301{
2302 int ret;
2303
2304 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2305 if (IS_ERR(mdwc->dwc3_gdsc))
2306 mdwc->dwc3_gdsc = NULL;
2307
2308 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2309 if (IS_ERR(mdwc->xo_clk)) {
2310 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2311 __func__);
2312 ret = PTR_ERR(mdwc->xo_clk);
2313 return ret;
2314 }
2315 clk_set_rate(mdwc->xo_clk, 19200000);
2316
2317 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2318 if (IS_ERR(mdwc->iface_clk)) {
2319 dev_err(mdwc->dev, "failed to get iface_clk\n");
2320 ret = PTR_ERR(mdwc->iface_clk);
2321 return ret;
2322 }
2323
2324 /*
2325 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2326 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2327 * On newer platform it can run at 150MHz as well.
2328 */
2329 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2330 if (IS_ERR(mdwc->core_clk)) {
2331 dev_err(mdwc->dev, "failed to get core_clk\n");
2332 ret = PTR_ERR(mdwc->core_clk);
2333 return ret;
2334 }
2335
Amit Nischal4d278212016-06-06 17:54:34 +05302336 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2337 if (IS_ERR(mdwc->core_reset)) {
2338 dev_err(mdwc->dev, "failed to get core_reset\n");
2339 return PTR_ERR(mdwc->core_reset);
2340 }
2341
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302342 if (!of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
2343 (u32 *)&mdwc->core_clk_rate)) {
2344 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
2345 mdwc->core_clk_rate);
2346 } else {
2347 /*
2348 * Get Max supported clk frequency for USB Core CLK and request
2349 * to set the same.
2350 */
2351 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
2352 }
2353
Mayank Rana511f3b22016-08-02 12:00:11 -07002354 if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
2355 dev_err(mdwc->dev, "fail to get core clk max freq.\n");
2356 } else {
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302357 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2358 mdwc->core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002359 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2360 if (ret)
2361 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n",
2362 ret);
2363 }
2364
2365 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2366 if (IS_ERR(mdwc->sleep_clk)) {
2367 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2368 ret = PTR_ERR(mdwc->sleep_clk);
2369 return ret;
2370 }
2371
2372 clk_set_rate(mdwc->sleep_clk, 32000);
2373 mdwc->utmi_clk_rate = 19200000;
2374 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2375 if (IS_ERR(mdwc->utmi_clk)) {
2376 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2377 ret = PTR_ERR(mdwc->utmi_clk);
2378 return ret;
2379 }
2380
2381 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2382 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2383 if (IS_ERR(mdwc->bus_aggr_clk))
2384 mdwc->bus_aggr_clk = NULL;
2385
2386 if (of_property_match_string(mdwc->dev->of_node,
2387 "clock-names", "cfg_ahb_clk") >= 0) {
2388 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2389 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2390 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2391 mdwc->cfg_ahb_clk = NULL;
2392 if (ret != -EPROBE_DEFER)
2393 dev_err(mdwc->dev,
2394 "failed to get cfg_ahb_clk ret %d\n",
2395 ret);
2396 return ret;
2397 }
2398 }
2399
2400 return 0;
2401}
2402
2403static int dwc3_msm_id_notifier(struct notifier_block *nb,
2404 unsigned long event, void *ptr)
2405{
2406 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
2407 struct extcon_dev *edev = ptr;
2408 enum dwc3_id_state id;
2409 int cc_state;
2410
2411 if (!edev) {
2412 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2413 goto done;
2414 }
2415
2416 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2417
2418 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2419
2420 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2421 if (cc_state < 0)
2422 mdwc->typec_orientation = ORIENTATION_NONE;
2423 else
2424 mdwc->typec_orientation =
2425 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2426
Mayank Rana511f3b22016-08-02 12:00:11 -07002427 if (mdwc->id_state != id) {
2428 mdwc->id_state = id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002429 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2430 }
2431
2432done:
2433 return NOTIFY_DONE;
2434}
2435
2436static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2437 unsigned long event, void *ptr)
2438{
2439 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2440 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2441 struct extcon_dev *edev = ptr;
2442 int cc_state;
2443
2444 if (!edev) {
2445 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2446 goto done;
2447 }
2448
2449 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2450
2451 if (mdwc->vbus_active == event)
2452 return NOTIFY_DONE;
2453
2454 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2455 if (cc_state < 0)
2456 mdwc->typec_orientation = ORIENTATION_NONE;
2457 else
2458 mdwc->typec_orientation =
2459 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2460
Mayank Rana511f3b22016-08-02 12:00:11 -07002461 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002462 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002463 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002464done:
2465 return NOTIFY_DONE;
2466}
2467
2468static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2469{
2470 struct device_node *node = mdwc->dev->of_node;
2471 struct extcon_dev *edev;
2472 int ret = 0;
2473
2474 if (!of_property_read_bool(node, "extcon"))
2475 return 0;
2476
2477 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2478 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2479 return PTR_ERR(edev);
2480
2481 if (!IS_ERR(edev)) {
2482 mdwc->extcon_vbus = edev;
2483 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2484 ret = extcon_register_notifier(edev, EXTCON_USB,
2485 &mdwc->vbus_nb);
2486 if (ret < 0) {
2487 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2488 return ret;
2489 }
2490 }
2491
2492 /* if a second phandle was provided, use it to get a separate edev */
2493 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2494 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2495 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2496 ret = PTR_ERR(edev);
2497 goto err;
2498 }
2499 }
2500
2501 if (!IS_ERR(edev)) {
2502 mdwc->extcon_id = edev;
2503 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2504 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2505 &mdwc->id_nb);
2506 if (ret < 0) {
2507 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2508 goto err;
2509 }
2510 }
2511
2512 return 0;
2513err:
2514 if (mdwc->extcon_vbus)
2515 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2516 &mdwc->vbus_nb);
2517 return ret;
2518}
2519
2520static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2521 char *buf)
2522{
2523 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2524
2525 if (mdwc->vbus_active)
2526 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2527 if (mdwc->id_state == DWC3_ID_GROUND)
2528 return snprintf(buf, PAGE_SIZE, "host\n");
2529
2530 return snprintf(buf, PAGE_SIZE, "none\n");
2531}
2532
2533static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2534 const char *buf, size_t count)
2535{
2536 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2537
2538 if (sysfs_streq(buf, "peripheral")) {
2539 mdwc->vbus_active = true;
2540 mdwc->id_state = DWC3_ID_FLOAT;
2541 } else if (sysfs_streq(buf, "host")) {
2542 mdwc->vbus_active = false;
2543 mdwc->id_state = DWC3_ID_GROUND;
2544 } else {
2545 mdwc->vbus_active = false;
2546 mdwc->id_state = DWC3_ID_FLOAT;
2547 }
2548
2549 dwc3_ext_event_notify(mdwc);
2550
2551 return count;
2552}
2553
2554static DEVICE_ATTR_RW(mode);
2555
2556static int dwc3_msm_probe(struct platform_device *pdev)
2557{
2558 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2559 struct device *dev = &pdev->dev;
2560 struct dwc3_msm *mdwc;
2561 struct dwc3 *dwc;
2562 struct resource *res;
2563 void __iomem *tcsr;
2564 bool host_mode;
2565 int ret = 0;
2566 int ext_hub_reset_gpio;
2567 u32 val;
2568
2569 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2570 if (!mdwc)
2571 return -ENOMEM;
2572
2573 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2574 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2575 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2576 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2577 return -EOPNOTSUPP;
2578 }
2579 }
2580
2581 platform_set_drvdata(pdev, mdwc);
2582 mdwc->dev = &pdev->dev;
2583
2584 INIT_LIST_HEAD(&mdwc->req_complete_list);
2585 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2586 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2587 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002588 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002589 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
2590
2591 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2592 if (!mdwc->dwc3_wq) {
2593 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2594 return -ENOMEM;
2595 }
2596
2597 /* Get all clks and gdsc reference */
2598 ret = dwc3_msm_get_clk_gdsc(mdwc);
2599 if (ret) {
2600 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2601 return ret;
2602 }
2603
2604 mdwc->id_state = DWC3_ID_FLOAT;
2605 set_bit(ID, &mdwc->inputs);
2606
2607 mdwc->charging_disabled = of_property_read_bool(node,
2608 "qcom,charging-disabled");
2609
2610 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2611 &mdwc->lpm_to_suspend_delay);
2612 if (ret) {
2613 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2614 mdwc->lpm_to_suspend_delay = 0;
2615 }
2616
2617 /*
2618 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2619 * DP and DM linestate transitions during low power mode.
2620 */
2621 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2622 if (mdwc->hs_phy_irq < 0) {
2623 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2624 ret = -EINVAL;
2625 goto err;
2626 } else {
2627 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2628 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2629 msm_dwc3_pwr_irq,
2630 msm_dwc3_pwr_irq_thread,
2631 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2632 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2633 if (ret) {
2634 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2635 ret);
2636 goto err;
2637 }
2638 }
2639
2640 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2641 if (mdwc->ss_phy_irq < 0) {
2642 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2643 } else {
2644 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2645 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2646 msm_dwc3_pwr_irq,
2647 msm_dwc3_pwr_irq_thread,
2648 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2649 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2650 if (ret) {
2651 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2652 ret);
2653 goto err;
2654 }
2655 }
2656
2657 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2658 if (mdwc->pwr_event_irq < 0) {
2659 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2660 ret = -EINVAL;
2661 goto err;
2662 } else {
2663 /* will be enabled in dwc3_msm_resume() */
2664 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2665 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2666 msm_dwc3_pwr_irq,
2667 msm_dwc3_pwr_irq_thread,
2668 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2669 "msm_dwc3", mdwc);
2670 if (ret) {
2671 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2672 ret);
2673 goto err;
2674 }
2675 }
2676
2677 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2678 if (!res) {
2679 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2680 } else {
2681 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2682 resource_size(res));
2683 if (IS_ERR_OR_NULL(tcsr)) {
2684 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2685 } else {
2686 /* Enable USB3 on the primary USB port. */
2687 writel_relaxed(0x1, tcsr);
2688 /*
2689 * Ensure that TCSR write is completed before
2690 * USB registers initialization.
2691 */
2692 mb();
2693 }
2694 }
2695
2696 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2697 if (!res) {
2698 dev_err(&pdev->dev, "missing memory base resource\n");
2699 ret = -ENODEV;
2700 goto err;
2701 }
2702
2703 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2704 resource_size(res));
2705 if (!mdwc->base) {
2706 dev_err(&pdev->dev, "ioremap failed\n");
2707 ret = -ENODEV;
2708 goto err;
2709 }
2710
2711 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2712 "ahb2phy_base");
2713 if (res) {
2714 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2715 res->start, resource_size(res));
2716 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2717 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2718 mdwc->ahb2phy_base = NULL;
2719 } else {
2720 /*
2721 * On some targets cfg_ahb_clk depends upon usb gdsc
2722 * regulator. If cfg_ahb_clk is enabled without
2723 * turning on usb gdsc regulator clk is stuck off.
2724 */
2725 dwc3_msm_config_gdsc(mdwc, 1);
2726 clk_prepare_enable(mdwc->cfg_ahb_clk);
2727 /* Configure AHB2PHY for one wait state read/write*/
2728 val = readl_relaxed(mdwc->ahb2phy_base +
2729 PERIPH_SS_AHB2PHY_TOP_CFG);
2730 if (val != ONE_READ_WRITE_WAIT) {
2731 writel_relaxed(ONE_READ_WRITE_WAIT,
2732 mdwc->ahb2phy_base +
2733 PERIPH_SS_AHB2PHY_TOP_CFG);
2734 /* complete above write before using USB PHY */
2735 mb();
2736 }
2737 clk_disable_unprepare(mdwc->cfg_ahb_clk);
2738 dwc3_msm_config_gdsc(mdwc, 0);
2739 }
2740 }
2741
2742 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
2743 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
2744 if (IS_ERR(mdwc->dbm)) {
2745 dev_err(&pdev->dev, "unable to get dbm device\n");
2746 ret = -EPROBE_DEFER;
2747 goto err;
2748 }
2749 /*
2750 * Add power event if the dbm indicates coming out of L1
2751 * by interrupt
2752 */
2753 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
2754 if (!mdwc->pwr_event_irq) {
2755 dev_err(&pdev->dev,
2756 "need pwr_event_irq exiting L1\n");
2757 ret = -EINVAL;
2758 goto err;
2759 }
2760 }
2761 }
2762
2763 ext_hub_reset_gpio = of_get_named_gpio(node,
2764 "qcom,ext-hub-reset-gpio", 0);
2765
2766 if (gpio_is_valid(ext_hub_reset_gpio)
2767 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
2768 "qcom,ext-hub-reset-gpio"))) {
2769 /* reset external hub */
2770 gpio_direction_output(ext_hub_reset_gpio, 1);
2771 /*
2772 * Hub reset should be asserted for minimum 5microsec
2773 * before deasserting.
2774 */
2775 usleep_range(5, 1000);
2776 gpio_direction_output(ext_hub_reset_gpio, 0);
2777 }
2778
2779 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
2780 &mdwc->tx_fifo_size))
2781 dev_err(&pdev->dev,
2782 "unable to read platform data tx fifo size\n");
2783
2784 mdwc->disable_host_mode_pm = of_property_read_bool(node,
2785 "qcom,disable-host-mode-pm");
2786
2787 dwc3_set_notifier(&dwc3_msm_notify_event);
2788
2789 /* Assumes dwc3 is the first DT child of dwc3-msm */
2790 dwc3_node = of_get_next_available_child(node, NULL);
2791 if (!dwc3_node) {
2792 dev_err(&pdev->dev, "failed to find dwc3 child\n");
2793 ret = -ENODEV;
2794 goto err;
2795 }
2796
2797 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2798 if (ret) {
2799 dev_err(&pdev->dev,
2800 "failed to add create dwc3 core\n");
2801 of_node_put(dwc3_node);
2802 goto err;
2803 }
2804
2805 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
2806 of_node_put(dwc3_node);
2807 if (!mdwc->dwc3) {
2808 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
2809 goto put_dwc3;
2810 }
2811
2812 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2813 "usb-phy", 0);
2814 if (IS_ERR(mdwc->hs_phy)) {
2815 dev_err(&pdev->dev, "unable to get hsphy device\n");
2816 ret = PTR_ERR(mdwc->hs_phy);
2817 goto put_dwc3;
2818 }
2819 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2820 "usb-phy", 1);
2821 if (IS_ERR(mdwc->ss_phy)) {
2822 dev_err(&pdev->dev, "unable to get ssphy device\n");
2823 ret = PTR_ERR(mdwc->ss_phy);
2824 goto put_dwc3;
2825 }
2826
2827 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2828 if (mdwc->bus_scale_table) {
2829 mdwc->bus_perf_client =
2830 msm_bus_scale_register_client(mdwc->bus_scale_table);
2831 }
2832
2833 dwc = platform_get_drvdata(mdwc->dwc3);
2834 if (!dwc) {
2835 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
2836 goto put_dwc3;
2837 }
2838
2839 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
2840 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
2841
2842 if (cpu_to_affin)
2843 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2844
2845 /*
2846 * Clocks and regulators will not be turned on until the first time
2847 * runtime PM resume is called. This is to allow for booting up with
2848 * charger already connected so as not to disturb PHY line states.
2849 */
2850 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
2851 atomic_set(&dwc->in_lpm, 1);
2852 pm_runtime_set_suspended(mdwc->dev);
2853 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
2854 pm_runtime_use_autosuspend(mdwc->dev);
2855 pm_runtime_enable(mdwc->dev);
2856 device_init_wakeup(mdwc->dev, 1);
2857
2858 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
2859 pm_runtime_get_noresume(mdwc->dev);
2860
2861 ret = dwc3_msm_extcon_register(mdwc);
2862 if (ret)
2863 goto put_dwc3;
2864
2865 /* Update initial VBUS/ID state from extcon */
2866 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
2867 EXTCON_USB))
2868 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
2869 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
2870 EXTCON_USB_HOST))
2871 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
2872
2873 device_create_file(&pdev->dev, &dev_attr_mode);
2874
2875 schedule_delayed_work(&mdwc->sm_work, 0);
2876
2877 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
2878 if (!dwc->is_drd && host_mode) {
2879 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
2880 mdwc->id_state = DWC3_ID_GROUND;
2881 dwc3_ext_event_notify(mdwc);
2882 }
2883
2884 return 0;
2885
2886put_dwc3:
2887 platform_device_put(mdwc->dwc3);
2888 if (mdwc->bus_perf_client)
2889 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2890err:
2891 return ret;
2892}
2893
2894static int dwc3_msm_remove_children(struct device *dev, void *data)
2895{
2896 device_unregister(dev);
2897 return 0;
2898}
2899
2900static int dwc3_msm_remove(struct platform_device *pdev)
2901{
2902 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
2903 int ret_pm;
2904
2905 device_remove_file(&pdev->dev, &dev_attr_mode);
2906
2907 if (cpu_to_affin)
2908 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2909
2910 /*
2911 * In case of system suspend, pm_runtime_get_sync fails.
2912 * Hence turn ON the clocks manually.
2913 */
2914 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002915 if (ret_pm < 0) {
2916 dev_err(mdwc->dev,
2917 "pm_runtime_get_sync failed with %d\n", ret_pm);
2918 clk_prepare_enable(mdwc->utmi_clk);
2919 clk_prepare_enable(mdwc->core_clk);
2920 clk_prepare_enable(mdwc->iface_clk);
2921 clk_prepare_enable(mdwc->sleep_clk);
2922 if (mdwc->bus_aggr_clk)
2923 clk_prepare_enable(mdwc->bus_aggr_clk);
2924 clk_prepare_enable(mdwc->xo_clk);
2925 }
2926
2927 cancel_delayed_work_sync(&mdwc->sm_work);
2928
2929 if (mdwc->hs_phy)
2930 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
2931 platform_device_put(mdwc->dwc3);
2932 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
2933
Mayank Rana511f3b22016-08-02 12:00:11 -07002934 pm_runtime_disable(mdwc->dev);
2935 pm_runtime_barrier(mdwc->dev);
2936 pm_runtime_put_sync(mdwc->dev);
2937 pm_runtime_set_suspended(mdwc->dev);
2938 device_wakeup_disable(mdwc->dev);
2939
2940 if (mdwc->bus_perf_client)
2941 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2942
2943 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
2944 regulator_disable(mdwc->vbus_reg);
2945
2946 disable_irq(mdwc->hs_phy_irq);
2947 if (mdwc->ss_phy_irq)
2948 disable_irq(mdwc->ss_phy_irq);
2949 disable_irq(mdwc->pwr_event_irq);
2950
2951 clk_disable_unprepare(mdwc->utmi_clk);
2952 clk_set_rate(mdwc->core_clk, 19200000);
2953 clk_disable_unprepare(mdwc->core_clk);
2954 clk_disable_unprepare(mdwc->iface_clk);
2955 clk_disable_unprepare(mdwc->sleep_clk);
2956 clk_disable_unprepare(mdwc->xo_clk);
2957 clk_put(mdwc->xo_clk);
2958
2959 dwc3_msm_config_gdsc(mdwc, 0);
2960
2961 return 0;
2962}
2963
2964#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
2965
2966/**
2967 * dwc3_otg_start_host - helper function for starting/stoping the host
2968 * controller driver.
2969 *
2970 * @mdwc: Pointer to the dwc3_msm structure.
2971 * @on: start / stop the host controller driver.
2972 *
2973 * Returns 0 on success otherwise negative errno.
2974 */
2975static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
2976{
2977 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2978 int ret = 0;
2979
2980 if (!dwc->xhci)
2981 return -EINVAL;
2982
2983 /*
2984 * The vbus_reg pointer could have multiple values
2985 * NULL: regulator_get() hasn't been called, or was previously deferred
2986 * IS_ERR: regulator could not be obtained, so skip using it
2987 * Valid pointer otherwise
2988 */
2989 if (!mdwc->vbus_reg) {
2990 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
2991 "vbus_dwc3");
2992 if (IS_ERR(mdwc->vbus_reg) &&
2993 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
2994 /* regulators may not be ready, so retry again later */
2995 mdwc->vbus_reg = NULL;
2996 return -EPROBE_DEFER;
2997 }
2998 }
2999
3000 if (on) {
3001 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3002
3003 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003004 mdwc->hs_phy->flags |= PHY_HOST_MODE;
3005 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3006 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3007 if (!IS_ERR(mdwc->vbus_reg))
3008 ret = regulator_enable(mdwc->vbus_reg);
3009 if (ret) {
3010 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3011 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3012 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3013 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003014 return ret;
3015 }
3016
3017 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3018
3019 /*
3020 * FIXME If micro A cable is disconnected during system suspend,
3021 * xhci platform device will be removed before runtime pm is
3022 * enabled for xhci device. Due to this, disable_depth becomes
3023 * greater than one and runtimepm is not enabled for next microA
3024 * connect. Fix this by calling pm_runtime_init for xhci device.
3025 */
3026 pm_runtime_init(&dwc->xhci->dev);
3027 ret = platform_device_add(dwc->xhci);
3028 if (ret) {
3029 dev_err(mdwc->dev,
3030 "%s: failed to add XHCI pdev ret=%d\n",
3031 __func__, ret);
3032 if (!IS_ERR(mdwc->vbus_reg))
3033 regulator_disable(mdwc->vbus_reg);
3034 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3035 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3036 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003037 return ret;
3038 }
3039
3040 /*
3041 * In some cases it is observed that USB PHY is not going into
3042 * suspend with host mode suspend functionality. Hence disable
3043 * XHCI's runtime PM here if disable_host_mode_pm is set.
3044 */
3045 if (mdwc->disable_host_mode_pm)
3046 pm_runtime_disable(&dwc->xhci->dev);
3047
3048 mdwc->in_host_mode = true;
3049 dwc3_usb3_phy_suspend(dwc, true);
3050
3051 /* xHCI should have incremented child count as necessary */
Mayank Rana511f3b22016-08-02 12:00:11 -07003052 pm_runtime_mark_last_busy(mdwc->dev);
3053 pm_runtime_put_sync_autosuspend(mdwc->dev);
3054 } else {
3055 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3056
3057 if (!IS_ERR(mdwc->vbus_reg))
3058 ret = regulator_disable(mdwc->vbus_reg);
3059 if (ret) {
3060 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3061 return ret;
3062 }
3063
3064 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003065 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3066 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3067 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3068 platform_device_del(dwc->xhci);
3069
3070 /*
3071 * Perform USB hardware RESET (both core reset and DBM reset)
3072 * when moving from host to peripheral. This is required for
3073 * peripheral mode to work.
3074 */
3075 dwc3_msm_block_reset(mdwc, true);
3076
3077 dwc3_usb3_phy_suspend(dwc, false);
3078 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3079
3080 mdwc->in_host_mode = false;
3081
3082 /* re-init core and OTG registers as block reset clears these */
3083 dwc3_post_host_reset_core_init(dwc);
3084 pm_runtime_mark_last_busy(mdwc->dev);
3085 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003086 }
3087
3088 return 0;
3089}
3090
3091static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3092{
3093 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3094
3095 /* Update OTG VBUS Valid from HSPHY to controller */
3096 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3097 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3098 UTMI_OTG_VBUS_VALID,
3099 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3100
3101 /* Update only if Super Speed is supported */
3102 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3103 /* Update VBUS Valid from SSPHY to controller */
3104 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3105 LANE0_PWR_PRESENT,
3106 vbus_present ? LANE0_PWR_PRESENT : 0);
3107 }
3108}
3109
3110/**
3111 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3112 *
3113 * @mdwc: Pointer to the dwc3_msm structure.
3114 * @on: Turn ON/OFF the gadget.
3115 *
3116 * Returns 0 on success otherwise negative errno.
3117 */
3118static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3119{
3120 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3121
3122 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003123
3124 if (on) {
3125 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3126 __func__, dwc->gadget.name);
3127
3128 dwc3_override_vbus_status(mdwc, true);
3129 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3130 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3131
3132 /*
3133 * Core reset is not required during start peripheral. Only
3134 * DBM reset is required, hence perform only DBM reset here.
3135 */
3136 dwc3_msm_block_reset(mdwc, false);
3137
3138 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3139 usb_gadget_vbus_connect(&dwc->gadget);
3140 } else {
3141 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3142 __func__, dwc->gadget.name);
3143 usb_gadget_vbus_disconnect(&dwc->gadget);
3144 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3145 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3146 dwc3_override_vbus_status(mdwc, false);
3147 dwc3_usb3_phy_suspend(dwc, false);
3148 }
3149
3150 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003151
3152 return 0;
3153}
3154
3155static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3156{
Jack Pham8caff352016-08-19 16:33:55 -07003157 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003158 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003159
3160 if (mdwc->charging_disabled)
3161 return 0;
3162
3163 if (mdwc->max_power == mA)
3164 return 0;
3165
3166 if (!mdwc->usb_psy) {
3167 mdwc->usb_psy = power_supply_get_by_name("usb");
3168 if (!mdwc->usb_psy) {
3169 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3170 return -ENODEV;
3171 }
3172 }
3173
Jack Pham8caff352016-08-19 16:33:55 -07003174 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
3175 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3176 return 0;
3177
Mayank Rana511f3b22016-08-02 12:00:11 -07003178 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3179
Mayank Rana511f3b22016-08-02 12:00:11 -07003180 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003181 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003182 ret = power_supply_set_property(mdwc->usb_psy,
3183 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3184 if (ret) {
3185 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3186 return ret;
3187 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003188
3189 mdwc->max_power = mA;
3190 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003191}
3192
3193
3194/**
3195 * dwc3_otg_sm_work - workqueue function.
3196 *
3197 * @w: Pointer to the dwc3 otg workqueue
3198 *
3199 * NOTE: After any change in otg_state, we must reschdule the state machine.
3200 */
3201static void dwc3_otg_sm_work(struct work_struct *w)
3202{
3203 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3204 struct dwc3 *dwc = NULL;
3205 bool work = 0;
3206 int ret = 0;
3207 unsigned long delay = 0;
3208 const char *state;
3209
3210 if (mdwc->dwc3)
3211 dwc = platform_get_drvdata(mdwc->dwc3);
3212
3213 if (!dwc) {
3214 dev_err(mdwc->dev, "dwc is NULL.\n");
3215 return;
3216 }
3217
3218 state = usb_otg_state_string(mdwc->otg_state);
3219 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana511f3b22016-08-02 12:00:11 -07003220
3221 /* Check OTG state */
3222 switch (mdwc->otg_state) {
3223 case OTG_STATE_UNDEFINED:
3224 /* Do nothing if no cable connected */
3225 if (test_bit(ID, &mdwc->inputs) &&
3226 !test_bit(B_SESS_VLD, &mdwc->inputs))
3227 break;
3228
Mayank Rana511f3b22016-08-02 12:00:11 -07003229 mdwc->otg_state = OTG_STATE_B_IDLE;
3230 /* fall-through */
3231 case OTG_STATE_B_IDLE:
3232 if (!test_bit(ID, &mdwc->inputs)) {
3233 dev_dbg(mdwc->dev, "!id\n");
3234 mdwc->otg_state = OTG_STATE_A_IDLE;
3235 work = 1;
3236 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3237 dev_dbg(mdwc->dev, "b_sess_vld\n");
3238 /*
3239 * Increment pm usage count upon cable connect. Count
3240 * is decremented in OTG_STATE_B_PERIPHERAL state on
3241 * cable disconnect or in bus suspend.
3242 */
3243 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003244 dwc3_otg_start_peripheral(mdwc, 1);
3245 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3246 work = 1;
3247 } else {
3248 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3249 dev_dbg(mdwc->dev, "Cable disconnected\n");
3250 }
3251 break;
3252
3253 case OTG_STATE_B_PERIPHERAL:
3254 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3255 !test_bit(ID, &mdwc->inputs)) {
3256 dev_dbg(mdwc->dev, "!id || !bsv\n");
3257 mdwc->otg_state = OTG_STATE_B_IDLE;
3258 dwc3_otg_start_peripheral(mdwc, 0);
3259 /*
3260 * Decrement pm usage count upon cable disconnect
3261 * which was incremented upon cable connect in
3262 * OTG_STATE_B_IDLE state
3263 */
3264 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003265 work = 1;
3266 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3267 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3268 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3269 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3270 /*
3271 * Decrement pm usage count upon bus suspend.
3272 * Count was incremented either upon cable
3273 * connect in OTG_STATE_B_IDLE or host
3274 * initiated resume after bus suspend in
3275 * OTG_STATE_B_SUSPEND state
3276 */
3277 pm_runtime_mark_last_busy(mdwc->dev);
3278 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003279 }
3280 break;
3281
3282 case OTG_STATE_B_SUSPEND:
3283 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3284 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3285 mdwc->otg_state = OTG_STATE_B_IDLE;
3286 dwc3_otg_start_peripheral(mdwc, 0);
3287 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3288 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3289 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3290 /*
3291 * Increment pm usage count upon host
3292 * initiated resume. Count was decremented
3293 * upon bus suspend in
3294 * OTG_STATE_B_PERIPHERAL state.
3295 */
3296 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003297 }
3298 break;
3299
3300 case OTG_STATE_A_IDLE:
3301 /* Switch to A-Device*/
3302 if (test_bit(ID, &mdwc->inputs)) {
3303 dev_dbg(mdwc->dev, "id\n");
3304 mdwc->otg_state = OTG_STATE_B_IDLE;
3305 mdwc->vbus_retry_count = 0;
3306 work = 1;
3307 } else {
3308 mdwc->otg_state = OTG_STATE_A_HOST;
3309 ret = dwc3_otg_start_host(mdwc, 1);
3310 if ((ret == -EPROBE_DEFER) &&
3311 mdwc->vbus_retry_count < 3) {
3312 /*
3313 * Get regulator failed as regulator driver is
3314 * not up yet. Will try to start host after 1sec
3315 */
3316 mdwc->otg_state = OTG_STATE_A_IDLE;
3317 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3318 delay = VBUS_REG_CHECK_DELAY;
3319 work = 1;
3320 mdwc->vbus_retry_count++;
3321 } else if (ret) {
3322 dev_err(mdwc->dev, "unable to start host\n");
3323 mdwc->otg_state = OTG_STATE_A_IDLE;
3324 goto ret;
3325 }
3326 }
3327 break;
3328
3329 case OTG_STATE_A_HOST:
3330 if (test_bit(ID, &mdwc->inputs)) {
3331 dev_dbg(mdwc->dev, "id\n");
3332 dwc3_otg_start_host(mdwc, 0);
3333 mdwc->otg_state = OTG_STATE_B_IDLE;
3334 mdwc->vbus_retry_count = 0;
3335 work = 1;
3336 } else {
3337 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003338 if (dwc)
3339 pm_runtime_resume(&dwc->xhci->dev);
3340 }
3341 break;
3342
3343 default:
3344 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3345
3346 }
3347
3348 if (work)
3349 schedule_delayed_work(&mdwc->sm_work, delay);
3350
3351ret:
3352 return;
3353}
3354
3355#ifdef CONFIG_PM_SLEEP
3356static int dwc3_msm_pm_suspend(struct device *dev)
3357{
3358 int ret = 0;
3359 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3360 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3361
3362 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003363
3364 flush_workqueue(mdwc->dwc3_wq);
3365 if (!atomic_read(&dwc->in_lpm)) {
3366 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3367 return -EBUSY;
3368 }
3369
3370 ret = dwc3_msm_suspend(mdwc);
3371 if (!ret)
3372 atomic_set(&mdwc->pm_suspended, 1);
3373
3374 return ret;
3375}
3376
3377static int dwc3_msm_pm_resume(struct device *dev)
3378{
3379 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3380
3381 dev_dbg(dev, "dwc3-msm PM resume\n");
3382
Mayank Rana511f3b22016-08-02 12:00:11 -07003383 /* flush to avoid race in read/write of pm_suspended */
3384 flush_workqueue(mdwc->dwc3_wq);
3385 atomic_set(&mdwc->pm_suspended, 0);
3386
3387 /* kick in otg state machine */
3388 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3389
3390 return 0;
3391}
3392#endif
3393
3394#ifdef CONFIG_PM
3395static int dwc3_msm_runtime_idle(struct device *dev)
3396{
3397 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003398
3399 return 0;
3400}
3401
3402static int dwc3_msm_runtime_suspend(struct device *dev)
3403{
3404 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3405
3406 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003407
3408 return dwc3_msm_suspend(mdwc);
3409}
3410
3411static int dwc3_msm_runtime_resume(struct device *dev)
3412{
3413 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3414
3415 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003416
3417 return dwc3_msm_resume(mdwc);
3418}
3419#endif
3420
3421static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3422 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3423 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3424 dwc3_msm_runtime_idle)
3425};
3426
3427static const struct of_device_id of_dwc3_matach[] = {
3428 {
3429 .compatible = "qcom,dwc-usb3-msm",
3430 },
3431 { },
3432};
3433MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3434
3435static struct platform_driver dwc3_msm_driver = {
3436 .probe = dwc3_msm_probe,
3437 .remove = dwc3_msm_remove,
3438 .driver = {
3439 .name = "msm-dwc3",
3440 .pm = &dwc3_msm_dev_pm_ops,
3441 .of_match_table = of_dwc3_matach,
3442 },
3443};
3444
3445MODULE_LICENSE("GPL v2");
3446MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3447
3448static int dwc3_msm_init(void)
3449{
3450 return platform_driver_register(&dwc3_msm_driver);
3451}
3452module_init(dwc3_msm_init);
3453
3454static void __exit dwc3_msm_exit(void)
3455{
3456 platform_driver_unregister(&dwc3_msm_driver);
3457}
3458module_exit(dwc3_msm_exit);