blob: 57db6d983443f03e39ecc3cdfca6cbfac58aafe1 [file] [log] [blame]
Mayank Rana511f3b22016-08-02 12:00:11 -07001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070038#include <linux/regulator/consumer.h>
39#include <linux/pm_wakeup.h>
40#include <linux/power_supply.h>
41#include <linux/cdev.h>
42#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070043#include <linux/msm-bus.h>
44#include <linux/irq.h>
45#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053046#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070047#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070048
49#include "power.h"
50#include "core.h"
51#include "gadget.h"
52#include "dbm.h"
53#include "debug.h"
54#include "xhci.h"
55
56/* time out to wait for USB cable status notification (in ms)*/
57#define SM_INIT_TIMEOUT 30000
58
59/* AHB2PHY register offsets */
60#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
61
62/* AHB2PHY read/write waite value */
63#define ONE_READ_WRITE_WAIT 0x11
64
65/* cpu to fix usb interrupt */
66static int cpu_to_affin;
67module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
69
70/* XHCI registers */
71#define USB3_HCSPARAMS1 (0x4)
72#define USB3_PORTSC (0x420)
73
74/**
75 * USB QSCRATCH Hardware registers
76 *
77 */
78#define QSCRATCH_REG_OFFSET (0x000F8800)
79#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
80#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
81#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
82#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
83
84#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
85#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
86#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
87#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
88#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
89
90/* QSCRATCH_GENERAL_CFG register bit offset */
91#define PIPE_UTMI_CLK_SEL BIT(0)
92#define PIPE3_PHYSTATUS_SW BIT(3)
93#define PIPE_UTMI_CLK_DIS BIT(8)
94
95#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
96#define UTMI_OTG_VBUS_VALID BIT(20)
97#define SW_SESSVLD_SEL BIT(28)
98
99#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
100#define LANE0_PWR_PRESENT BIT(24)
101
102/* GSI related registers */
103#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
104#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
105
106#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
107#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
108#define GSI_CLK_EN_MASK BIT(12)
109#define BLOCK_GSI_WR_GO_MASK BIT(1)
110#define GSI_EN_MASK BIT(0)
111
112#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
113#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
114#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
115#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
116
117#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
118#define GSI_WR_CTRL_STATE_MASK BIT(15)
119
120struct dwc3_msm_req_complete {
121 struct list_head list_item;
122 struct usb_request *req;
123 void (*orig_complete)(struct usb_ep *ep,
124 struct usb_request *req);
125};
126
127enum dwc3_id_state {
128 DWC3_ID_GROUND = 0,
129 DWC3_ID_FLOAT,
130};
131
132/* for type c cable */
133enum plug_orientation {
134 ORIENTATION_NONE,
135 ORIENTATION_CC1,
136 ORIENTATION_CC2,
137};
138
139/* Input bits to state machine (mdwc->inputs) */
140
141#define ID 0
142#define B_SESS_VLD 1
143#define B_SUSPEND 2
144
145struct dwc3_msm {
146 struct device *dev;
147 void __iomem *base;
148 void __iomem *ahb2phy_base;
149 struct platform_device *dwc3;
150 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
151 struct list_head req_complete_list;
152 struct clk *xo_clk;
153 struct clk *core_clk;
154 long core_clk_rate;
155 struct clk *iface_clk;
156 struct clk *sleep_clk;
157 struct clk *utmi_clk;
158 unsigned int utmi_clk_rate;
159 struct clk *utmi_clk_src;
160 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530161 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700162 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530163 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700164 struct regulator *dwc3_gdsc;
165
166 struct usb_phy *hs_phy, *ss_phy;
167
168 struct dbm *dbm;
169
170 /* VBUS regulator for host mode */
171 struct regulator *vbus_reg;
172 int vbus_retry_count;
173 bool resume_pending;
174 atomic_t pm_suspended;
175 int hs_phy_irq;
176 int ss_phy_irq;
177 struct work_struct resume_work;
178 struct work_struct restart_usb_work;
179 bool in_restart;
180 struct workqueue_struct *dwc3_wq;
181 struct delayed_work sm_work;
182 unsigned long inputs;
183 unsigned int max_power;
184 bool charging_disabled;
185 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700186 struct work_struct bus_vote_w;
187 unsigned int bus_vote;
188 u32 bus_perf_client;
189 struct msm_bus_scale_pdata *bus_scale_table;
190 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700191 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700192 bool in_host_mode;
193 unsigned int tx_fifo_size;
194 bool vbus_active;
195 bool suspend;
196 bool disable_host_mode_pm;
197 enum dwc3_id_state id_state;
198 unsigned long lpm_flags;
199#define MDWC3_SS_PHY_SUSPEND BIT(0)
200#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
201#define MDWC3_POWER_COLLAPSE BIT(2)
202
203 unsigned int irq_to_affin;
204 struct notifier_block dwc3_cpu_notifier;
205
206 struct extcon_dev *extcon_vbus;
207 struct extcon_dev *extcon_id;
208 struct notifier_block vbus_nb;
209 struct notifier_block id_nb;
210
211 int pwr_event_irq;
212 atomic_t in_p3;
213 unsigned int lpm_to_suspend_delay;
214 bool init;
215 enum plug_orientation typec_orientation;
216};
217
218#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
219#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
220#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
221
222#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
223#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
224#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
225
226#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
227#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
228#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
229
230#define DSTS_CONNECTSPD_SS 0x4
231
232
233static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
234static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
235
236/**
237 *
238 * Read register with debug info.
239 *
240 * @base - DWC3 base virtual address.
241 * @offset - register offset.
242 *
243 * @return u32
244 */
245static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
246{
247 u32 val = ioread32(base + offset);
248 return val;
249}
250
251/**
252 * Read register masked field with debug info.
253 *
254 * @base - DWC3 base virtual address.
255 * @offset - register offset.
256 * @mask - register bitmask.
257 *
258 * @return u32
259 */
260static inline u32 dwc3_msm_read_reg_field(void *base,
261 u32 offset,
262 const u32 mask)
263{
264 u32 shift = find_first_bit((void *)&mask, 32);
265 u32 val = ioread32(base + offset);
266
267 val &= mask; /* clear other bits */
268 val >>= shift;
269 return val;
270}
271
272/**
273 *
274 * Write register with debug info.
275 *
276 * @base - DWC3 base virtual address.
277 * @offset - register offset.
278 * @val - value to write.
279 *
280 */
281static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
282{
283 iowrite32(val, base + offset);
284}
285
286/**
287 * Write register masked field with debug info.
288 *
289 * @base - DWC3 base virtual address.
290 * @offset - register offset.
291 * @mask - register bitmask.
292 * @val - value to write.
293 *
294 */
295static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
296 const u32 mask, u32 val)
297{
298 u32 shift = find_first_bit((void *)&mask, 32);
299 u32 tmp = ioread32(base + offset);
300
301 tmp &= ~mask; /* clear written bits */
302 val = tmp | (val << shift);
303 iowrite32(val, base + offset);
304}
305
306/**
307 * Write register and read back masked value to confirm it is written
308 *
309 * @base - DWC3 base virtual address.
310 * @offset - register offset.
311 * @mask - register bitmask specifying what should be updated
312 * @val - value to write.
313 *
314 */
315static inline void dwc3_msm_write_readback(void *base, u32 offset,
316 const u32 mask, u32 val)
317{
318 u32 write_val, tmp = ioread32(base + offset);
319
320 tmp &= ~mask; /* retain other bits */
321 write_val = tmp | val;
322
323 iowrite32(write_val, base + offset);
324
325 /* Read back to see if val was written */
326 tmp = ioread32(base + offset);
327 tmp &= mask; /* clear other bits */
328
329 if (tmp != val)
330 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
331 __func__, val, offset);
332}
333
334static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
335{
336 int i, num_ports;
337 u32 reg;
338
339 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
340 num_ports = HCS_MAX_PORTS(reg);
341
342 for (i = 0; i < num_ports; i++) {
343 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
344 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
345 return true;
346 }
347
348 return false;
349}
350
351static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
352{
353 u8 speed;
354
355 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
356 return !!(speed & DSTS_CONNECTSPD_SS);
357}
358
359static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
360{
361 if (mdwc->in_host_mode)
362 return dwc3_msm_is_host_superspeed(mdwc);
363
364 return dwc3_msm_is_dev_superspeed(mdwc);
365}
366
367#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
368/**
369 * Configure the DBM with the BAM's data fifo.
370 * This function is called by the USB BAM Driver
371 * upon initialization.
372 *
373 * @ep - pointer to usb endpoint.
374 * @addr - address of data fifo.
375 * @size - size of data fifo.
376 *
377 */
378int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
379 u32 size, u8 dst_pipe_idx)
380{
381 struct dwc3_ep *dep = to_dwc3_ep(ep);
382 struct dwc3 *dwc = dep->dwc;
383 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
384
385 dev_dbg(mdwc->dev, "%s\n", __func__);
386
387 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
388 dst_pipe_idx);
389}
390
391
392/**
393* Cleanups for msm endpoint on request complete.
394*
395* Also call original request complete.
396*
397* @usb_ep - pointer to usb_ep instance.
398* @request - pointer to usb_request instance.
399*
400* @return int - 0 on success, negative on error.
401*/
402static void dwc3_msm_req_complete_func(struct usb_ep *ep,
403 struct usb_request *request)
404{
405 struct dwc3_ep *dep = to_dwc3_ep(ep);
406 struct dwc3 *dwc = dep->dwc;
407 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
408 struct dwc3_msm_req_complete *req_complete = NULL;
409
410 /* Find original request complete function and remove it from list */
411 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
412 if (req_complete->req == request)
413 break;
414 }
415 if (!req_complete || req_complete->req != request) {
416 dev_err(dep->dwc->dev, "%s: could not find the request\n",
417 __func__);
418 return;
419 }
420 list_del(&req_complete->list_item);
421
422 /*
423 * Release another one TRB to the pool since DBM queue took 2 TRBs
424 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
425 * released only one.
426 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700427 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700428
429 /* Unconfigure dbm ep */
430 dbm_ep_unconfig(mdwc->dbm, dep->number);
431
432 /*
433 * If this is the last endpoint we unconfigured, than reset also
434 * the event buffers; unless unconfiguring the ep due to lpm,
435 * in which case the event buffer only gets reset during the
436 * block reset.
437 */
438 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
439 !dbm_reset_ep_after_lpm(mdwc->dbm))
440 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
441
442 /*
443 * Call original complete function, notice that dwc->lock is already
444 * taken by the caller of this function (dwc3_gadget_giveback()).
445 */
446 request->complete = req_complete->orig_complete;
447 if (request->complete)
448 request->complete(ep, request);
449
450 kfree(req_complete);
451}
452
453
454/**
455* Helper function
456*
457* Reset DBM endpoint.
458*
459* @mdwc - pointer to dwc3_msm instance.
460* @dep - pointer to dwc3_ep instance.
461*
462* @return int - 0 on success, negative on error.
463*/
464static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
465{
466 int ret;
467
468 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
469
470 /* Reset the dbm endpoint */
471 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
472 if (ret) {
473 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
474 __func__);
475 return ret;
476 }
477
478 /*
479 * The necessary delay between asserting and deasserting the dbm ep
480 * reset is based on the number of active endpoints. If there is more
481 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
482 * delay will suffice.
483 */
484 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
485 usleep_range(1000, 1200);
486 else
487 udelay(10);
488 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
489 if (ret) {
490 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
491 __func__);
492 return ret;
493 }
494
495 return 0;
496}
497
498/**
499* Reset the DBM endpoint which is linked to the given USB endpoint.
500*
501* @usb_ep - pointer to usb_ep instance.
502*
503* @return int - 0 on success, negative on error.
504*/
505
506int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
507{
508 struct dwc3_ep *dep = to_dwc3_ep(ep);
509 struct dwc3 *dwc = dep->dwc;
510 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
511
512 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
513}
514EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
515
516
517/**
518* Helper function.
519* See the header of the dwc3_msm_ep_queue function.
520*
521* @dwc3_ep - pointer to dwc3_ep instance.
522* @req - pointer to dwc3_request instance.
523*
524* @return int - 0 on success, negative on error.
525*/
526static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
527{
528 struct dwc3_trb *trb;
529 struct dwc3_trb *trb_link;
530 struct dwc3_gadget_ep_cmd_params params;
531 u32 cmd;
532 int ret = 0;
533
Mayank Rana83ad5822016-08-09 14:17:22 -0700534 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700535 * this request is issued with start transfer. The request will be out
536 * from this list in 2 cases. The first is that the transfer will be
537 * completed (not if the transfer is endless using a circular TRBs with
538 * with link TRB). The second case is an option to do stop stransfer,
539 * this can be initiated by the function driver when calling dequeue.
540 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700541 req->started = true;
542 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700543
544 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana83ad5822016-08-09 14:17:22 -0700545 trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
546 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700547 memset(trb, 0, sizeof(*trb));
548
549 req->trb = trb;
550 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
551 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
552 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
553 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
554 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
555
556 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana83ad5822016-08-09 14:17:22 -0700557 trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
558 dep->trb_enqueue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700559 memset(trb_link, 0, sizeof(*trb_link));
560
561 trb_link->bpl = lower_32_bits(req->trb_dma);
562 trb_link->bph = DBM_TRB_BIT |
563 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
564 trb_link->size = 0;
565 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
566
567 /*
568 * Now start the transfer
569 */
570 memset(&params, 0, sizeof(params));
571 params.param0 = 0; /* TDAddr High */
572 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
573
574 /* DBM requires IOC to be set */
575 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700576 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700577 if (ret < 0) {
578 dev_dbg(dep->dwc->dev,
579 "%s: failed to send STARTTRANSFER command\n",
580 __func__);
581
582 list_del(&req->list);
583 return ret;
584 }
585 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700586 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700587
588 return ret;
589}
590
591/**
592* Queue a usb request to the DBM endpoint.
593* This function should be called after the endpoint
594* was enabled by the ep_enable.
595*
596* This function prepares special structure of TRBs which
597* is familiar with the DBM HW, so it will possible to use
598* this endpoint in DBM mode.
599*
600* The TRBs prepared by this function, is one normal TRB
601* which point to a fake buffer, followed by a link TRB
602* that points to the first TRB.
603*
604* The API of this function follow the regular API of
605* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
606*
607* @usb_ep - pointer to usb_ep instance.
608* @request - pointer to usb_request instance.
609* @gfp_flags - possible flags.
610*
611* @return int - 0 on success, negative on error.
612*/
613static int dwc3_msm_ep_queue(struct usb_ep *ep,
614 struct usb_request *request, gfp_t gfp_flags)
615{
616 struct dwc3_request *req = to_dwc3_request(request);
617 struct dwc3_ep *dep = to_dwc3_ep(ep);
618 struct dwc3 *dwc = dep->dwc;
619 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
620 struct dwc3_msm_req_complete *req_complete;
621 unsigned long flags;
622 int ret = 0, size;
623 u8 bam_pipe;
624 bool producer;
625 bool disable_wb;
626 bool internal_mem;
627 bool ioc;
628 bool superspeed;
629
630 if (!(request->udc_priv & MSM_SPS_MODE)) {
631 /* Not SPS mode, call original queue */
632 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
633 __func__);
634
635 return (mdwc->original_ep_ops[dep->number])->queue(ep,
636 request,
637 gfp_flags);
638 }
639
640 /* HW restriction regarding TRB size (8KB) */
641 if (req->request.length < 0x2000) {
642 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
643 return -EINVAL;
644 }
645
646 /*
647 * Override req->complete function, but before doing that,
648 * store it's original pointer in the req_complete_list.
649 */
650 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
651 if (!req_complete)
652 return -ENOMEM;
653
654 req_complete->req = request;
655 req_complete->orig_complete = request->complete;
656 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
657 request->complete = dwc3_msm_req_complete_func;
658
659 /*
660 * Configure the DBM endpoint
661 */
662 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
663 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
664 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
665 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
666 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
667
668 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
669 disable_wb, internal_mem, ioc);
670 if (ret < 0) {
671 dev_err(mdwc->dev,
672 "error %d after calling dbm_ep_config\n", ret);
673 return ret;
674 }
675
676 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
677 __func__, request, ep->name, request->length);
678 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
679 dbm_event_buffer_config(mdwc->dbm,
680 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
681 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
682 DWC3_GEVNTSIZ_SIZE(size));
683
684 /*
685 * We must obtain the lock of the dwc3 core driver,
686 * including disabling interrupts, so we will be sure
687 * that we are the only ones that configure the HW device
688 * core and ensure that we queuing the request will finish
689 * as soon as possible so we will release back the lock.
690 */
691 spin_lock_irqsave(&dwc->lock, flags);
692 if (!dep->endpoint.desc) {
693 dev_err(mdwc->dev,
694 "%s: trying to queue request %p to disabled ep %s\n",
695 __func__, request, ep->name);
696 ret = -EPERM;
697 goto err;
698 }
699
700 if (dep->number == 0 || dep->number == 1) {
701 dev_err(mdwc->dev,
702 "%s: trying to queue dbm request %p to control ep %s\n",
703 __func__, request, ep->name);
704 ret = -EPERM;
705 goto err;
706 }
707
708
Mayank Rana83ad5822016-08-09 14:17:22 -0700709 if (dep->trb_dequeue != dep->trb_enqueue ||
710 !list_empty(&dep->pending_list)
711 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700712 dev_err(mdwc->dev,
713 "%s: trying to queue dbm request %p tp ep %s\n",
714 __func__, request, ep->name);
715 ret = -EPERM;
716 goto err;
717 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700718 dep->trb_dequeue = 0;
719 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700720 }
721
722 ret = __dwc3_msm_ep_queue(dep, req);
723 if (ret < 0) {
724 dev_err(mdwc->dev,
725 "error %d after calling __dwc3_msm_ep_queue\n", ret);
726 goto err;
727 }
728
729 spin_unlock_irqrestore(&dwc->lock, flags);
730 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
731 dbm_set_speed(mdwc->dbm, (u8)superspeed);
732
733 return 0;
734
735err:
736 spin_unlock_irqrestore(&dwc->lock, flags);
737 kfree(req_complete);
738 return ret;
739}
740
741/*
742* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
743*
744* @usb_ep - pointer to usb_ep instance.
745*
746* @return int - XferRscIndex
747*/
748static inline int gsi_get_xfer_index(struct usb_ep *ep)
749{
750 struct dwc3_ep *dep = to_dwc3_ep(ep);
751
752 return dep->resource_index;
753}
754
755/*
756* Fills up the GSI channel information needed in call to IPA driver
757* for GSI channel creation.
758*
759* @usb_ep - pointer to usb_ep instance.
760* @ch_info - output parameter with requested channel info
761*/
762static void gsi_get_channel_info(struct usb_ep *ep,
763 struct gsi_channel_info *ch_info)
764{
765 struct dwc3_ep *dep = to_dwc3_ep(ep);
766 int last_trb_index = 0;
767 struct dwc3 *dwc = dep->dwc;
768 struct usb_gsi_request *request = ch_info->ch_req;
769
770 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
771 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Rana83ad5822016-08-09 14:17:22 -0700772 DWC3_DEPCMD);
Mayank Rana511f3b22016-08-02 12:00:11 -0700773 ch_info->depcmd_hi_addr = 0;
774
775 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
776 &dep->trb_pool[0]);
777 /* Convert to multipled of 1KB */
778 ch_info->const_buffer_size = request->buf_len/1024;
779
780 /* IN direction */
781 if (dep->direction) {
782 /*
783 * Multiply by size of each TRB for xfer_ring_len in bytes.
784 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
785 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
786 */
787 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
788 last_trb_index = 2 * request->num_bufs + 2;
789 } else { /* OUT direction */
790 /*
791 * Multiply by size of each TRB for xfer_ring_len in bytes.
792 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
793 * LINK TRB.
794 */
795 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
796 last_trb_index = request->num_bufs + 1;
797 }
798
799 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
800 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
801 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
802 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
803 DWC3_GEVNTCOUNT(ep->ep_intr_num));
804 ch_info->gevntcount_hi_addr = 0;
805
806 dev_dbg(dwc->dev,
807 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
808 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
809 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
810}
811
812/*
813* Perform StartXfer on GSI EP. Stores XferRscIndex.
814*
815* @usb_ep - pointer to usb_ep instance.
816*
817* @return int - 0 on success
818*/
819static int gsi_startxfer_for_ep(struct usb_ep *ep)
820{
821 int ret;
822 struct dwc3_gadget_ep_cmd_params params;
823 u32 cmd;
824 struct dwc3_ep *dep = to_dwc3_ep(ep);
825 struct dwc3 *dwc = dep->dwc;
826
827 memset(&params, 0, sizeof(params));
828 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
829 params.param0 |= (ep->ep_intr_num << 16);
830 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
831 &dep->trb_pool[0]));
832 cmd = DWC3_DEPCMD_STARTTRANSFER;
833 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700834 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700835
836 if (ret < 0)
837 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700838 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700839 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
840 return ret;
841}
842
843/*
844* Store Ring Base and Doorbell Address for GSI EP
845* for GSI channel creation.
846*
847* @usb_ep - pointer to usb_ep instance.
848* @dbl_addr - Doorbell address obtained from IPA driver
849*/
850static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
851{
852 struct dwc3_ep *dep = to_dwc3_ep(ep);
853 struct dwc3 *dwc = dep->dwc;
854 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
855 int n = ep->ep_intr_num - 1;
856
857 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
858 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
859 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
860
861 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
862 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
863 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
864 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
865}
866
867/*
868* Rings Doorbell for IN GSI Channel
869*
870* @usb_ep - pointer to usb_ep instance.
871* @request - pointer to GSI request. This is used to pass in the
872* address of the GSI doorbell obtained from IPA driver
873*/
874static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
875{
876 void __iomem *gsi_dbl_address_lsb;
877 void __iomem *gsi_dbl_address_msb;
878 dma_addr_t offset;
879 u64 dbl_addr = *((u64 *)request->buf_base_addr);
880 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
881 u32 dbl_hi_addr = (dbl_addr >> 32);
882 u32 num_trbs = (request->num_bufs * 2 + 2);
883 struct dwc3_ep *dep = to_dwc3_ep(ep);
884 struct dwc3 *dwc = dep->dwc;
885 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
886
887 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
888 dbl_lo_addr, sizeof(u32));
889 if (!gsi_dbl_address_lsb)
890 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
891
892 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
893 dbl_hi_addr, sizeof(u32));
894 if (!gsi_dbl_address_msb)
895 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
896
897 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
898 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
899 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
900
901 writel_relaxed(offset, gsi_dbl_address_lsb);
902 writel_relaxed(0, gsi_dbl_address_msb);
903}
904
905/*
906* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
907*
908* @usb_ep - pointer to usb_ep instance.
909* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
910*
911* @return int - 0 on success
912*/
913static int gsi_updatexfer_for_ep(struct usb_ep *ep,
914 struct usb_gsi_request *request)
915{
916 int i;
917 int ret;
918 u32 cmd;
919 int num_trbs = request->num_bufs + 1;
920 struct dwc3_trb *trb;
921 struct dwc3_gadget_ep_cmd_params params;
922 struct dwc3_ep *dep = to_dwc3_ep(ep);
923 struct dwc3 *dwc = dep->dwc;
924
925 for (i = 0; i < num_trbs - 1; i++) {
926 trb = &dep->trb_pool[i];
927 trb->ctrl |= DWC3_TRB_CTRL_HWO;
928 }
929
930 memset(&params, 0, sizeof(params));
931 cmd = DWC3_DEPCMD_UPDATETRANSFER;
932 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -0700933 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700934 dep->flags |= DWC3_EP_BUSY;
935 if (ret < 0)
936 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
937 return ret;
938}
939
940/*
941* Perform EndXfer on particular GSI EP.
942*
943* @usb_ep - pointer to usb_ep instance.
944*/
945static void gsi_endxfer_for_ep(struct usb_ep *ep)
946{
947 struct dwc3_ep *dep = to_dwc3_ep(ep);
948 struct dwc3 *dwc = dep->dwc;
949
950 dwc3_stop_active_transfer(dwc, dep->number, true);
951}
952
953/*
954* Allocates and configures TRBs for GSI EPs.
955*
956* @usb_ep - pointer to usb_ep instance.
957* @request - pointer to GSI request.
958*
959* @return int - 0 on success
960*/
961static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
962{
963 int i = 0;
964 dma_addr_t buffer_addr = req->dma;
965 struct dwc3_ep *dep = to_dwc3_ep(ep);
966 struct dwc3 *dwc = dep->dwc;
967 struct dwc3_trb *trb;
968 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
969 : (req->num_bufs + 1);
970
971 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
972 num_trbs * sizeof(struct dwc3_trb),
973 num_trbs * sizeof(struct dwc3_trb), 0);
974 if (!dep->trb_dma_pool) {
975 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
976 dep->name);
977 return -ENOMEM;
978 }
979
980 dep->num_trbs = num_trbs;
981
982 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
983 GFP_KERNEL, &dep->trb_pool_dma);
984 if (!dep->trb_pool) {
985 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
986 dep->name);
987 return -ENOMEM;
988 }
989
990 /* IN direction */
991 if (dep->direction) {
992 for (i = 0; i < num_trbs ; i++) {
993 trb = &dep->trb_pool[i];
994 memset(trb, 0, sizeof(*trb));
995 /* Set up first n+1 TRBs for ZLPs */
996 if (i < (req->num_bufs + 1)) {
997 trb->bpl = 0;
998 trb->bph = 0;
999 trb->size = 0;
1000 trb->ctrl = DWC3_TRBCTL_NORMAL
1001 | DWC3_TRB_CTRL_IOC;
1002 continue;
1003 }
1004
1005 /* Setup n TRBs pointing to valid buffers */
1006 trb->bpl = lower_32_bits(buffer_addr);
1007 trb->bph = 0;
1008 trb->size = 0;
1009 trb->ctrl = DWC3_TRBCTL_NORMAL
1010 | DWC3_TRB_CTRL_IOC;
1011 buffer_addr += req->buf_len;
1012
1013 /* Set up the Link TRB at the end */
1014 if (i == (num_trbs - 1)) {
1015 trb->bpl = dwc3_trb_dma_offset(dep,
1016 &dep->trb_pool[0]);
1017 trb->bph = (1 << 23) | (1 << 21)
1018 | (ep->ep_intr_num << 16);
1019 trb->size = 0;
1020 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1021 | DWC3_TRB_CTRL_HWO;
1022 }
1023 }
1024 } else { /* OUT direction */
1025
1026 for (i = 0; i < num_trbs ; i++) {
1027
1028 trb = &dep->trb_pool[i];
1029 memset(trb, 0, sizeof(*trb));
1030 trb->bpl = lower_32_bits(buffer_addr);
1031 trb->bph = 0;
1032 trb->size = req->buf_len;
1033 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1034 | DWC3_TRB_CTRL_CSP
1035 | DWC3_TRB_CTRL_ISP_IMI;
1036 buffer_addr += req->buf_len;
1037
1038 /* Set up the Link TRB at the end */
1039 if (i == (num_trbs - 1)) {
1040 trb->bpl = dwc3_trb_dma_offset(dep,
1041 &dep->trb_pool[0]);
1042 trb->bph = (1 << 23) | (1 << 21)
1043 | (ep->ep_intr_num << 16);
1044 trb->size = 0;
1045 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1046 | DWC3_TRB_CTRL_HWO;
1047 }
1048 }
1049 }
1050 return 0;
1051}
1052
1053/*
1054* Frees TRBs for GSI EPs.
1055*
1056* @usb_ep - pointer to usb_ep instance.
1057*
1058*/
1059static void gsi_free_trbs(struct usb_ep *ep)
1060{
1061 struct dwc3_ep *dep = to_dwc3_ep(ep);
1062
1063 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1064 return;
1065
1066 /* Free TRBs and TRB pool for EP */
1067 if (dep->trb_dma_pool) {
1068 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1069 dep->trb_pool_dma);
1070 dma_pool_destroy(dep->trb_dma_pool);
1071 dep->trb_pool = NULL;
1072 dep->trb_pool_dma = 0;
1073 dep->trb_dma_pool = NULL;
1074 }
1075}
1076/*
1077* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1078*
1079* @usb_ep - pointer to usb_ep instance.
1080* @request - pointer to GSI request.
1081*/
1082static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1083{
1084 struct dwc3_ep *dep = to_dwc3_ep(ep);
1085 struct dwc3 *dwc = dep->dwc;
1086 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1087 struct dwc3_gadget_ep_cmd_params params;
1088 const struct usb_endpoint_descriptor *desc = ep->desc;
1089 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1090 u32 reg;
1091
1092 memset(&params, 0x00, sizeof(params));
1093
1094 /* Configure GSI EP */
1095 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1096 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1097
1098 /* Burst size is only needed in SuperSpeed mode */
1099 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1100 u32 burst = dep->endpoint.maxburst - 1;
1101
1102 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1103 }
1104
1105 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1106 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1107 | DWC3_DEPCFG_STREAM_EVENT_EN;
1108 dep->stream_capable = true;
1109 }
1110
1111 /* Set EP number */
1112 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1113
1114 /* Set interrupter number for GSI endpoints */
1115 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1116
1117 /* Enable XferInProgress and XferComplete Interrupts */
1118 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1119 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1120 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1121 /*
1122 * We must use the lower 16 TX FIFOs even though
1123 * HW might have more
1124 */
1125 /* Remove FIFO Number for GSI EP*/
1126 if (dep->direction)
1127 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1128
1129 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1130
1131 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1132 params.param0, params.param1, params.param2, dep->name);
1133
Mayank Rana83ad5822016-08-09 14:17:22 -07001134 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001135
1136 /* Set XferRsc Index for GSI EP */
1137 if (!(dep->flags & DWC3_EP_ENABLED)) {
1138 memset(&params, 0x00, sizeof(params));
1139 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001140 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001141 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1142
1143 dep->endpoint.desc = desc;
1144 dep->comp_desc = comp_desc;
1145 dep->type = usb_endpoint_type(desc);
1146 dep->flags |= DWC3_EP_ENABLED;
1147 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1148 reg |= DWC3_DALEPENA_EP(dep->number);
1149 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1150 }
1151
1152}
1153
1154/*
1155* Enables USB wrapper for GSI
1156*
1157* @usb_ep - pointer to usb_ep instance.
1158*/
1159static void gsi_enable(struct usb_ep *ep)
1160{
1161 struct dwc3_ep *dep = to_dwc3_ep(ep);
1162 struct dwc3 *dwc = dep->dwc;
1163 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1164
1165 dwc3_msm_write_reg_field(mdwc->base,
1166 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1167 dwc3_msm_write_reg_field(mdwc->base,
1168 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1169 dwc3_msm_write_reg_field(mdwc->base,
1170 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1171 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1172 dwc3_msm_write_reg_field(mdwc->base,
1173 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1174}
1175
1176/*
1177* Block or allow doorbell towards GSI
1178*
1179* @usb_ep - pointer to usb_ep instance.
1180* @request - pointer to GSI request. In this case num_bufs is used as a bool
1181* to set or clear the doorbell bit
1182*/
1183static void gsi_set_clear_dbell(struct usb_ep *ep,
1184 bool block_db)
1185{
1186
1187 struct dwc3_ep *dep = to_dwc3_ep(ep);
1188 struct dwc3 *dwc = dep->dwc;
1189 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1190
1191 dwc3_msm_write_reg_field(mdwc->base,
1192 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1193}
1194
1195/*
1196* Performs necessary checks before stopping GSI channels
1197*
1198* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1199*/
1200static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1201{
1202 u32 timeout = 1500;
1203 u32 reg = 0;
1204 struct dwc3_ep *dep = to_dwc3_ep(ep);
1205 struct dwc3 *dwc = dep->dwc;
1206 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1207
1208 while (dwc3_msm_read_reg_field(mdwc->base,
1209 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1210 if (!timeout--) {
1211 dev_err(mdwc->dev,
1212 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1213 return false;
1214 }
1215 }
1216 /* Check for U3 only if we are not handling Function Suspend */
1217 if (!f_suspend) {
1218 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1219 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1220 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1221 return false;
1222 }
1223 }
1224
1225 return true;
1226}
1227
1228
1229/**
1230* Performs GSI operations or GSI EP related operations.
1231*
1232* @usb_ep - pointer to usb_ep instance.
1233* @op_data - pointer to opcode related data.
1234* @op - GSI related or GSI EP related op code.
1235*
1236* @return int - 0 on success, negative on error.
1237* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1238*/
1239static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1240 void *op_data, enum gsi_ep_op op)
1241{
1242 u32 ret = 0;
1243 struct dwc3_ep *dep = to_dwc3_ep(ep);
1244 struct dwc3 *dwc = dep->dwc;
1245 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1246 struct usb_gsi_request *request;
1247 struct gsi_channel_info *ch_info;
1248 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001249 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001250
1251 switch (op) {
1252 case GSI_EP_OP_PREPARE_TRBS:
1253 request = (struct usb_gsi_request *)op_data;
1254 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1255 ret = gsi_prepare_trbs(ep, request);
1256 break;
1257 case GSI_EP_OP_FREE_TRBS:
1258 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1259 gsi_free_trbs(ep);
1260 break;
1261 case GSI_EP_OP_CONFIG:
1262 request = (struct usb_gsi_request *)op_data;
1263 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001264 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001265 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001266 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001267 break;
1268 case GSI_EP_OP_STARTXFER:
1269 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001270 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001271 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001272 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001273 break;
1274 case GSI_EP_OP_GET_XFER_IDX:
1275 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1276 ret = gsi_get_xfer_index(ep);
1277 break;
1278 case GSI_EP_OP_STORE_DBL_INFO:
1279 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1280 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1281 break;
1282 case GSI_EP_OP_ENABLE_GSI:
1283 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1284 gsi_enable(ep);
1285 break;
1286 case GSI_EP_OP_GET_CH_INFO:
1287 ch_info = (struct gsi_channel_info *)op_data;
1288 gsi_get_channel_info(ep, ch_info);
1289 break;
1290 case GSI_EP_OP_RING_IN_DB:
1291 request = (struct usb_gsi_request *)op_data;
1292 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1293 gsi_ring_in_db(ep, request);
1294 break;
1295 case GSI_EP_OP_UPDATEXFER:
1296 request = (struct usb_gsi_request *)op_data;
1297 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001298 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001299 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001300 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001301 break;
1302 case GSI_EP_OP_ENDXFER:
1303 request = (struct usb_gsi_request *)op_data;
1304 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001305 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001306 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001307 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001308 break;
1309 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1310 block_db = *((bool *)op_data);
1311 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1312 block_db);
1313 gsi_set_clear_dbell(ep, block_db);
1314 break;
1315 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1316 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1317 f_suspend = *((bool *)op_data);
1318 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1319 break;
1320 case GSI_EP_OP_DISABLE:
1321 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1322 ret = ep->ops->disable(ep);
1323 break;
1324 default:
1325 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1326 }
1327
1328 return ret;
1329}
1330
1331/**
1332 * Configure MSM endpoint.
1333 * This function do specific configurations
1334 * to an endpoint which need specific implementaion
1335 * in the MSM architecture.
1336 *
1337 * This function should be called by usb function/class
1338 * layer which need a support from the specific MSM HW
1339 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1340 *
1341 * @ep - a pointer to some usb_ep instance
1342 *
1343 * @return int - 0 on success, negetive on error.
1344 */
1345int msm_ep_config(struct usb_ep *ep)
1346{
1347 struct dwc3_ep *dep = to_dwc3_ep(ep);
1348 struct dwc3 *dwc = dep->dwc;
1349 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1350 struct usb_ep_ops *new_ep_ops;
1351
1352
1353 /* Save original ep ops for future restore*/
1354 if (mdwc->original_ep_ops[dep->number]) {
1355 dev_err(mdwc->dev,
1356 "ep [%s,%d] already configured as msm endpoint\n",
1357 ep->name, dep->number);
1358 return -EPERM;
1359 }
1360 mdwc->original_ep_ops[dep->number] = ep->ops;
1361
1362 /* Set new usb ops as we like */
1363 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1364 if (!new_ep_ops)
1365 return -ENOMEM;
1366
1367 (*new_ep_ops) = (*ep->ops);
1368 new_ep_ops->queue = dwc3_msm_ep_queue;
1369 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1370 ep->ops = new_ep_ops;
1371
1372 /*
1373 * Do HERE more usb endpoint configurations
1374 * which are specific to MSM.
1375 */
1376
1377 return 0;
1378}
1379EXPORT_SYMBOL(msm_ep_config);
1380
1381/**
1382 * Un-configure MSM endpoint.
1383 * Tear down configurations done in the
1384 * dwc3_msm_ep_config function.
1385 *
1386 * @ep - a pointer to some usb_ep instance
1387 *
1388 * @return int - 0 on success, negative on error.
1389 */
1390int msm_ep_unconfig(struct usb_ep *ep)
1391{
1392 struct dwc3_ep *dep = to_dwc3_ep(ep);
1393 struct dwc3 *dwc = dep->dwc;
1394 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1395 struct usb_ep_ops *old_ep_ops;
1396
1397 /* Restore original ep ops */
1398 if (!mdwc->original_ep_ops[dep->number]) {
1399 dev_err(mdwc->dev,
1400 "ep [%s,%d] was not configured as msm endpoint\n",
1401 ep->name, dep->number);
1402 return -EINVAL;
1403 }
1404 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1405 ep->ops = mdwc->original_ep_ops[dep->number];
1406 mdwc->original_ep_ops[dep->number] = NULL;
1407 kfree(old_ep_ops);
1408
1409 /*
1410 * Do HERE more usb endpoint un-configurations
1411 * which are specific to MSM.
1412 */
1413
1414 return 0;
1415}
1416EXPORT_SYMBOL(msm_ep_unconfig);
1417#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1418
1419static void dwc3_resume_work(struct work_struct *w);
1420
1421static void dwc3_restart_usb_work(struct work_struct *w)
1422{
1423 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1424 restart_usb_work);
1425 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1426 unsigned int timeout = 50;
1427
1428 dev_dbg(mdwc->dev, "%s\n", __func__);
1429
1430 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1431 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1432 return;
1433 }
1434
1435 /* guard against concurrent VBUS handling */
1436 mdwc->in_restart = true;
1437
1438 if (!mdwc->vbus_active) {
1439 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1440 dwc->err_evt_seen = false;
1441 mdwc->in_restart = false;
1442 return;
1443 }
1444
Mayank Rana511f3b22016-08-02 12:00:11 -07001445 /* Reset active USB connection */
1446 dwc3_resume_work(&mdwc->resume_work);
1447
1448 /* Make sure disconnect is processed before sending connect */
1449 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1450 msleep(20);
1451
1452 if (!timeout) {
1453 dev_dbg(mdwc->dev,
1454 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001455 pm_runtime_suspend(mdwc->dev);
1456 }
1457
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301458 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001459 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301460 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001461 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001462
1463 dwc->err_evt_seen = false;
1464 flush_delayed_work(&mdwc->sm_work);
1465}
1466
1467/*
1468 * Check whether the DWC3 requires resetting the ep
1469 * after going to Low Power Mode (lpm)
1470 */
1471bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1472{
1473 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1474 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1475
1476 return dbm_reset_ep_after_lpm(mdwc->dbm);
1477}
1478EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1479
1480/*
1481 * Config Global Distributed Switch Controller (GDSC)
1482 * to support controller power collapse
1483 */
1484static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1485{
1486 int ret;
1487
1488 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1489 return -EPERM;
1490
1491 if (on) {
1492 ret = regulator_enable(mdwc->dwc3_gdsc);
1493 if (ret) {
1494 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1495 return ret;
1496 }
1497 } else {
1498 ret = regulator_disable(mdwc->dwc3_gdsc);
1499 if (ret) {
1500 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1501 return ret;
1502 }
1503 }
1504
1505 return ret;
1506}
1507
1508static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1509{
1510 int ret = 0;
1511
1512 if (assert) {
1513 disable_irq(mdwc->pwr_event_irq);
1514 /* Using asynchronous block reset to the hardware */
1515 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1516 clk_disable_unprepare(mdwc->utmi_clk);
1517 clk_disable_unprepare(mdwc->sleep_clk);
1518 clk_disable_unprepare(mdwc->core_clk);
1519 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301520 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001521 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301522 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001523 } else {
1524 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301525 ret = reset_control_deassert(mdwc->core_reset);
1526 if (ret)
1527 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001528 ndelay(200);
1529 clk_prepare_enable(mdwc->iface_clk);
1530 clk_prepare_enable(mdwc->core_clk);
1531 clk_prepare_enable(mdwc->sleep_clk);
1532 clk_prepare_enable(mdwc->utmi_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001533 enable_irq(mdwc->pwr_event_irq);
1534 }
1535
1536 return ret;
1537}
1538
1539static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1540{
1541 u32 guctl, gfladj = 0;
1542
1543 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1544 guctl &= ~DWC3_GUCTL_REFCLKPER;
1545
1546 /* GFLADJ register is used starting with revision 2.50a */
1547 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1548 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1549 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1550 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1551 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1552 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1553 }
1554
1555 /* Refer to SNPS Databook Table 6-55 for calculations used */
1556 switch (mdwc->utmi_clk_rate) {
1557 case 19200000:
1558 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1559 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1560 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1561 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1562 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1563 break;
1564 case 24000000:
1565 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1566 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1567 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1568 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1569 break;
1570 default:
1571 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1572 mdwc->utmi_clk_rate);
1573 break;
1574 }
1575
1576 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1577 if (gfladj)
1578 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1579}
1580
1581/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1582static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1583{
1584 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1585 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1586 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1587 BIT(2), 1);
1588
1589 /*
1590 * Enable master clock for RAMs to allow BAM to access RAMs when
1591 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1592 * are seen where RAM clocks get turned OFF in SS mode
1593 */
1594 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1595 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1596
1597}
1598
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001599static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1600{
1601 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1602 vbus_draw_work);
1603 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1604
1605 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1606}
1607
Mayank Rana511f3b22016-08-02 12:00:11 -07001608static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1609{
1610 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1611 u32 reg;
1612
1613 if (dwc->revision < DWC3_REVISION_230A)
1614 return;
1615
1616 switch (event) {
1617 case DWC3_CONTROLLER_ERROR_EVENT:
1618 dev_info(mdwc->dev,
1619 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1620 dwc->irq_cnt);
1621
1622 dwc3_gadget_disable_irq(dwc);
1623
1624 /* prevent core from generating interrupts until recovery */
1625 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1626 reg |= DWC3_GCTL_CORESOFTRESET;
1627 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1628
1629 /* restart USB which performs full reset and reconnect */
1630 schedule_work(&mdwc->restart_usb_work);
1631 break;
1632 case DWC3_CONTROLLER_RESET_EVENT:
1633 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1634 /* HS & SSPHYs get reset as part of core soft reset */
1635 dwc3_msm_qscratch_reg_init(mdwc);
1636 break;
1637 case DWC3_CONTROLLER_POST_RESET_EVENT:
1638 dev_dbg(mdwc->dev,
1639 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1640
1641 /*
1642 * Below sequence is used when controller is working without
1643 * having ssphy and only USB high speed is supported.
1644 */
1645 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1646 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1647 dwc3_msm_read_reg(mdwc->base,
1648 QSCRATCH_GENERAL_CFG)
1649 | PIPE_UTMI_CLK_DIS);
1650
1651 usleep_range(2, 5);
1652
1653
1654 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1655 dwc3_msm_read_reg(mdwc->base,
1656 QSCRATCH_GENERAL_CFG)
1657 | PIPE_UTMI_CLK_SEL
1658 | PIPE3_PHYSTATUS_SW);
1659
1660 usleep_range(2, 5);
1661
1662 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1663 dwc3_msm_read_reg(mdwc->base,
1664 QSCRATCH_GENERAL_CFG)
1665 & ~PIPE_UTMI_CLK_DIS);
1666 }
1667
1668 dwc3_msm_update_ref_clk(mdwc);
1669 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1670 break;
1671 case DWC3_CONTROLLER_CONNDONE_EVENT:
1672 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1673 /*
1674 * Add power event if the dbm indicates coming out of L1 by
1675 * interrupt
1676 */
1677 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1678 dwc3_msm_write_reg_field(mdwc->base,
1679 PWR_EVNT_IRQ_MASK_REG,
1680 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1681
1682 atomic_set(&dwc->in_lpm, 0);
1683 break;
1684 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1685 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1686 if (dwc->enable_bus_suspend) {
1687 mdwc->suspend = dwc->b_suspend;
1688 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1689 }
1690 break;
1691 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1692 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001693 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001694 break;
1695 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1696 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001697 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001698 break;
1699 default:
1700 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1701 break;
1702 }
1703}
1704
1705static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1706{
1707 int ret = 0;
1708
1709 if (core_reset) {
1710 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1711 if (ret)
1712 return;
1713
1714 usleep_range(1000, 1200);
1715 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1716 if (ret)
1717 return;
1718
1719 usleep_range(10000, 12000);
1720 }
1721
1722 if (mdwc->dbm) {
1723 /* Reset the DBM */
1724 dbm_soft_reset(mdwc->dbm, 1);
1725 usleep_range(1000, 1200);
1726 dbm_soft_reset(mdwc->dbm, 0);
1727
1728 /*enable DBM*/
1729 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1730 DBM_EN_MASK, 0x1);
1731 dbm_enable(mdwc->dbm);
1732 }
1733}
1734
1735static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1736{
1737 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1738 u32 val;
1739
1740 /* Configure AHB2PHY for one wait state read/write */
1741 if (mdwc->ahb2phy_base) {
1742 clk_prepare_enable(mdwc->cfg_ahb_clk);
1743 val = readl_relaxed(mdwc->ahb2phy_base +
1744 PERIPH_SS_AHB2PHY_TOP_CFG);
1745 if (val != ONE_READ_WRITE_WAIT) {
1746 writel_relaxed(ONE_READ_WRITE_WAIT,
1747 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1748 /* complete above write before configuring USB PHY. */
1749 mb();
1750 }
1751 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1752 }
1753
1754 if (!mdwc->init) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001755 dwc3_core_pre_init(dwc);
1756 mdwc->init = true;
1757 }
1758
1759 dwc3_core_init(dwc);
1760 /* Re-configure event buffers */
1761 dwc3_event_buffers_setup(dwc);
1762}
1763
1764static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1765{
1766 unsigned long timeout;
1767 u32 reg = 0;
1768
1769 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301770 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001771 if (!atomic_read(&mdwc->in_p3)) {
1772 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1773 return -EBUSY;
1774 }
1775 }
1776
1777 /* Clear previous L2 events */
1778 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1779 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1780
1781 /* Prepare HSPHY for suspend */
1782 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1783 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1784 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1785
1786 /* Wait for PHY to go into L2 */
1787 timeout = jiffies + msecs_to_jiffies(5);
1788 while (!time_after(jiffies, timeout)) {
1789 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1790 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1791 break;
1792 }
1793 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1794 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1795
1796 /* Clear L2 event bit */
1797 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1798 PWR_EVNT_LPM_IN_L2_MASK);
1799
1800 return 0;
1801}
1802
1803static void dwc3_msm_bus_vote_w(struct work_struct *w)
1804{
1805 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1806 int ret;
1807
1808 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1809 mdwc->bus_vote);
1810 if (ret)
1811 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1812}
1813
1814static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1815{
1816 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1817 int i, num_ports;
1818 u32 reg;
1819
1820 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1821 if (mdwc->in_host_mode) {
1822 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1823 num_ports = HCS_MAX_PORTS(reg);
1824 for (i = 0; i < num_ports; i++) {
1825 reg = dwc3_msm_read_reg(mdwc->base,
1826 USB3_PORTSC + i*0x10);
1827 if (reg & PORT_PE) {
1828 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1829 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1830 else if (DEV_LOWSPEED(reg))
1831 mdwc->hs_phy->flags |= PHY_LS_MODE;
1832 }
1833 }
1834 } else {
1835 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1836 dwc->gadget.speed == USB_SPEED_FULL)
1837 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1838 else if (dwc->gadget.speed == USB_SPEED_LOW)
1839 mdwc->hs_phy->flags |= PHY_LS_MODE;
1840 }
1841}
1842
1843
1844static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1845{
Mayank Rana83ad5822016-08-09 14:17:22 -07001846 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001847 bool can_suspend_ssphy;
1848 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07001849 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001850
1851 if (atomic_read(&dwc->in_lpm)) {
1852 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1853 return 0;
1854 }
1855
1856 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07001857 evt = dwc->ev_buf;
1858 if ((evt->flags & DWC3_EVENT_PENDING)) {
1859 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001860 "%s: %d device events pending, abort suspend\n",
1861 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07001862 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07001863 }
1864 }
1865
1866 if (!mdwc->vbus_active && dwc->is_drd &&
1867 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
1868 /*
1869 * In some cases, the pm_runtime_suspend may be called by
1870 * usb_bam when there is pending lpm flag. However, if this is
1871 * done when cable was disconnected and otg state has not
1872 * yet changed to IDLE, then it means OTG state machine
1873 * is running and we race against it. So cancel LPM for now,
1874 * and OTG state machine will go for LPM later, after completing
1875 * transition to IDLE state.
1876 */
1877 dev_dbg(mdwc->dev,
1878 "%s: cable disconnected while not in idle otg state\n",
1879 __func__);
1880 return -EBUSY;
1881 }
1882
1883 /*
1884 * Check if device is not in CONFIGURED state
1885 * then check controller state of L2 and break
1886 * LPM sequence. Check this for device bus suspend case.
1887 */
1888 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
1889 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
1890 pr_err("%s(): Trying to go in LPM with state:%d\n",
1891 __func__, dwc->gadget.state);
1892 pr_err("%s(): LPM is not performed.\n", __func__);
1893 return -EBUSY;
1894 }
1895
1896 ret = dwc3_msm_prepare_suspend(mdwc);
1897 if (ret)
1898 return ret;
1899
1900 /* Initialize variables here */
1901 can_suspend_ssphy = !(mdwc->in_host_mode &&
1902 dwc3_msm_is_host_superspeed(mdwc));
1903
1904 /* Disable core irq */
1905 if (dwc->irq)
1906 disable_irq(dwc->irq);
1907
1908 /* disable power event irq, hs and ss phy irq is used as wake up src */
1909 disable_irq(mdwc->pwr_event_irq);
1910
1911 dwc3_set_phy_speed_flags(mdwc);
1912 /* Suspend HS PHY */
1913 usb_phy_set_suspend(mdwc->hs_phy, 1);
1914
1915 /* Suspend SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07001916 if (dwc->maximum_speed == USB_SPEED_SUPER && can_suspend_ssphy) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001917 /* indicate phy about SS mode */
1918 if (dwc3_msm_is_superspeed(mdwc))
1919 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
1920 usb_phy_set_suspend(mdwc->ss_phy, 1);
1921 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
1922 }
1923
1924 /* make sure above writes are completed before turning off clocks */
1925 wmb();
1926
1927 /* Disable clocks */
1928 if (mdwc->bus_aggr_clk)
1929 clk_disable_unprepare(mdwc->bus_aggr_clk);
1930 clk_disable_unprepare(mdwc->utmi_clk);
1931
Hemant Kumar633dc332016-08-10 13:41:05 -07001932 /* Memory core: OFF, Memory periphery: OFF */
1933 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
1934 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
1935 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
1936 }
1937
Mayank Rana511f3b22016-08-02 12:00:11 -07001938 clk_set_rate(mdwc->core_clk, 19200000);
1939 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05301940 if (mdwc->noc_aggr_clk)
1941 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07001942 /*
1943 * Disable iface_clk only after core_clk as core_clk has FSM
1944 * depedency on iface_clk. Hence iface_clk should be turned off
1945 * after core_clk is turned off.
1946 */
1947 clk_disable_unprepare(mdwc->iface_clk);
1948 /* USB PHY no more requires TCXO */
1949 clk_disable_unprepare(mdwc->xo_clk);
1950
1951 /* Perform controller power collapse */
1952 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
1953 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
1954 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
1955 dwc3_msm_config_gdsc(mdwc, 0);
1956 clk_disable_unprepare(mdwc->sleep_clk);
1957 }
1958
1959 /* Remove bus voting */
1960 if (mdwc->bus_perf_client) {
1961 mdwc->bus_vote = 0;
1962 schedule_work(&mdwc->bus_vote_w);
1963 }
1964
1965 /*
1966 * release wakeup source with timeout to defer system suspend to
1967 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
1968 * event is received.
1969 */
1970 if (mdwc->lpm_to_suspend_delay) {
1971 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
1972 mdwc->lpm_to_suspend_delay);
1973 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
1974 } else {
1975 pm_relax(mdwc->dev);
1976 }
1977
1978 atomic_set(&dwc->in_lpm, 1);
1979
1980 /*
1981 * with DCP or during cable disconnect, we dont require wakeup
1982 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
1983 * case of host bus suspend and device bus suspend.
1984 */
1985 if (mdwc->vbus_active || mdwc->in_host_mode) {
1986 enable_irq_wake(mdwc->hs_phy_irq);
1987 enable_irq(mdwc->hs_phy_irq);
1988 if (mdwc->ss_phy_irq) {
1989 enable_irq_wake(mdwc->ss_phy_irq);
1990 enable_irq(mdwc->ss_phy_irq);
1991 }
1992 /*
1993 * Enable power event irq during bus suspend in host mode for
1994 * mapping MPM pin for DP so that wakeup can happen in system
1995 * suspend.
1996 */
1997 if (mdwc->in_host_mode) {
1998 enable_irq(mdwc->pwr_event_irq);
1999 enable_irq_wake(mdwc->pwr_event_irq);
2000 }
2001 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2002 }
2003
2004 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2005 return 0;
2006}
2007
2008static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2009{
2010 int ret;
2011 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2012
2013 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2014
2015 if (!atomic_read(&dwc->in_lpm)) {
2016 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2017 return 0;
2018 }
2019
2020 pm_stay_awake(mdwc->dev);
2021
2022 /* Enable bus voting */
2023 if (mdwc->bus_perf_client) {
2024 mdwc->bus_vote = 1;
2025 schedule_work(&mdwc->bus_vote_w);
2026 }
2027
2028 /* Vote for TCXO while waking up USB HSPHY */
2029 ret = clk_prepare_enable(mdwc->xo_clk);
2030 if (ret)
2031 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2032 __func__, ret);
2033
2034 /* Restore controller power collapse */
2035 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2036 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2037 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302038 ret = reset_control_assert(mdwc->core_reset);
2039 if (ret)
2040 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2041 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002042 /* HW requires a short delay for reset to take place properly */
2043 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302044 ret = reset_control_deassert(mdwc->core_reset);
2045 if (ret)
2046 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2047 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002048 clk_prepare_enable(mdwc->sleep_clk);
2049 }
2050
2051 /*
2052 * Enable clocks
2053 * Turned ON iface_clk before core_clk due to FSM depedency.
2054 */
2055 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302056 if (mdwc->noc_aggr_clk)
2057 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002058 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2059 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002060
2061 /* set Memory core: ON, Memory periphery: ON */
2062 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2063 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2064
Mayank Rana511f3b22016-08-02 12:00:11 -07002065 clk_prepare_enable(mdwc->utmi_clk);
2066 if (mdwc->bus_aggr_clk)
2067 clk_prepare_enable(mdwc->bus_aggr_clk);
2068
2069 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002070 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2071 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002072 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2073 if (mdwc->typec_orientation == ORIENTATION_CC1)
2074 mdwc->ss_phy->flags |= PHY_LANE_A;
2075 if (mdwc->typec_orientation == ORIENTATION_CC2)
2076 mdwc->ss_phy->flags |= PHY_LANE_B;
2077 usb_phy_set_suspend(mdwc->ss_phy, 0);
2078 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2079 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2080 }
2081
2082 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2083 /* Resume HS PHY */
2084 usb_phy_set_suspend(mdwc->hs_phy, 0);
2085
2086 /* Recover from controller power collapse */
2087 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2088 u32 tmp;
2089
2090 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2091
2092 dwc3_msm_power_collapse_por(mdwc);
2093
2094 /* Get initial P3 status and enable IN_P3 event */
2095 tmp = dwc3_msm_read_reg_field(mdwc->base,
2096 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2097 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2098 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2099 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2100
2101 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2102 }
2103
2104 atomic_set(&dwc->in_lpm, 0);
2105
2106 /* Disable HSPHY auto suspend */
2107 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2108 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2109 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2110 DWC3_GUSB2PHYCFG_SUSPHY));
2111
2112 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2113 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2114 disable_irq_wake(mdwc->hs_phy_irq);
2115 disable_irq_nosync(mdwc->hs_phy_irq);
2116 if (mdwc->ss_phy_irq) {
2117 disable_irq_wake(mdwc->ss_phy_irq);
2118 disable_irq_nosync(mdwc->ss_phy_irq);
2119 }
2120 if (mdwc->in_host_mode) {
2121 disable_irq_wake(mdwc->pwr_event_irq);
2122 disable_irq(mdwc->pwr_event_irq);
2123 }
2124 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2125 }
2126
2127 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2128
2129 /* enable power evt irq for IN P3 detection */
2130 enable_irq(mdwc->pwr_event_irq);
2131
2132 /* Enable core irq */
2133 if (dwc->irq)
2134 enable_irq(dwc->irq);
2135
2136 /*
2137 * Handle other power events that could not have been handled during
2138 * Low Power Mode
2139 */
2140 dwc3_pwr_event_handler(mdwc);
2141
Mayank Rana511f3b22016-08-02 12:00:11 -07002142 return 0;
2143}
2144
2145/**
2146 * dwc3_ext_event_notify - callback to handle events from external transceiver
2147 *
2148 * Returns 0 on success
2149 */
2150static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2151{
2152 /* Flush processing any pending events before handling new ones */
2153 flush_delayed_work(&mdwc->sm_work);
2154
2155 if (mdwc->id_state == DWC3_ID_FLOAT) {
2156 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2157 set_bit(ID, &mdwc->inputs);
2158 } else {
2159 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2160 clear_bit(ID, &mdwc->inputs);
2161 }
2162
2163 if (mdwc->vbus_active && !mdwc->in_restart) {
2164 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2165 set_bit(B_SESS_VLD, &mdwc->inputs);
2166 } else {
2167 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2168 clear_bit(B_SESS_VLD, &mdwc->inputs);
2169 }
2170
2171 if (mdwc->suspend) {
2172 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2173 set_bit(B_SUSPEND, &mdwc->inputs);
2174 } else {
2175 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2176 clear_bit(B_SUSPEND, &mdwc->inputs);
2177 }
2178
2179 schedule_delayed_work(&mdwc->sm_work, 0);
2180}
2181
2182static void dwc3_resume_work(struct work_struct *w)
2183{
2184 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002185
2186 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2187
2188 /*
2189 * exit LPM first to meet resume timeline from device side.
2190 * resume_pending flag would prevent calling
2191 * dwc3_msm_resume() in case we are here due to system
2192 * wide resume without usb cable connected. This flag is set
2193 * only in case of power event irq in lpm.
2194 */
2195 if (mdwc->resume_pending) {
2196 dwc3_msm_resume(mdwc);
2197 mdwc->resume_pending = false;
2198 }
2199
Mayank Rana83ad5822016-08-09 14:17:22 -07002200 if (atomic_read(&mdwc->pm_suspended))
Mayank Rana511f3b22016-08-02 12:00:11 -07002201 /* let pm resume kick in resume work later */
2202 return;
Mayank Rana511f3b22016-08-02 12:00:11 -07002203 dwc3_ext_event_notify(mdwc);
2204}
2205
2206static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2207{
2208 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2209 u32 irq_stat, irq_clear = 0;
2210
2211 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2212 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2213
2214 /* Check for P3 events */
2215 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2216 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2217 /* Can't tell if entered or exit P3, so check LINKSTATE */
2218 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2219 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2220 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2221 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2222
2223 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2224 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2225 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2226 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2227 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2228 atomic_set(&mdwc->in_p3, 0);
2229 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2230 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2231 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2232 atomic_set(&mdwc->in_p3, 1);
2233 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2234 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2235 }
2236
2237 /* Clear L2 exit */
2238 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2239 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2240 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2241 }
2242
2243 /* Handle exit from L1 events */
2244 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2245 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2246 __func__);
2247 if (usb_gadget_wakeup(&dwc->gadget))
2248 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2249 __func__);
2250 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2251 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2252 }
2253
2254 /* Unhandled events */
2255 if (irq_stat)
2256 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2257 __func__, irq_stat);
2258
2259 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2260}
2261
2262static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2263{
2264 struct dwc3_msm *mdwc = _mdwc;
2265 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2266
2267 dev_dbg(mdwc->dev, "%s\n", __func__);
2268
2269 if (atomic_read(&dwc->in_lpm))
2270 dwc3_resume_work(&mdwc->resume_work);
2271 else
2272 dwc3_pwr_event_handler(mdwc);
2273
Mayank Rana511f3b22016-08-02 12:00:11 -07002274 return IRQ_HANDLED;
2275}
2276
2277static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2278{
2279 struct dwc3_msm *mdwc = data;
2280 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2281
2282 dwc->t_pwr_evt_irq = ktime_get();
2283 dev_dbg(mdwc->dev, "%s received\n", __func__);
2284 /*
2285 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2286 * which interrupts have been triggered, as the clocks are disabled.
2287 * Resume controller by waking up pwr event irq thread.After re-enabling
2288 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2289 * all other power events.
2290 */
2291 if (atomic_read(&dwc->in_lpm)) {
2292 /* set this to call dwc3_msm_resume() */
2293 mdwc->resume_pending = true;
2294 return IRQ_WAKE_THREAD;
2295 }
2296
2297 dwc3_pwr_event_handler(mdwc);
2298 return IRQ_HANDLED;
2299}
2300
2301static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2302 unsigned long action, void *hcpu)
2303{
2304 uint32_t cpu = (uintptr_t)hcpu;
2305 struct dwc3_msm *mdwc =
2306 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2307
2308 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2309 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2310 cpu_to_affin, mdwc->irq_to_affin);
2311 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2312 }
2313
2314 return NOTIFY_OK;
2315}
2316
2317static void dwc3_otg_sm_work(struct work_struct *w);
2318
2319static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2320{
2321 int ret;
2322
2323 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2324 if (IS_ERR(mdwc->dwc3_gdsc))
2325 mdwc->dwc3_gdsc = NULL;
2326
2327 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2328 if (IS_ERR(mdwc->xo_clk)) {
2329 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2330 __func__);
2331 ret = PTR_ERR(mdwc->xo_clk);
2332 return ret;
2333 }
2334 clk_set_rate(mdwc->xo_clk, 19200000);
2335
2336 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2337 if (IS_ERR(mdwc->iface_clk)) {
2338 dev_err(mdwc->dev, "failed to get iface_clk\n");
2339 ret = PTR_ERR(mdwc->iface_clk);
2340 return ret;
2341 }
2342
2343 /*
2344 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2345 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2346 * On newer platform it can run at 150MHz as well.
2347 */
2348 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2349 if (IS_ERR(mdwc->core_clk)) {
2350 dev_err(mdwc->dev, "failed to get core_clk\n");
2351 ret = PTR_ERR(mdwc->core_clk);
2352 return ret;
2353 }
2354
Amit Nischal4d278212016-06-06 17:54:34 +05302355 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2356 if (IS_ERR(mdwc->core_reset)) {
2357 dev_err(mdwc->dev, "failed to get core_reset\n");
2358 return PTR_ERR(mdwc->core_reset);
2359 }
2360
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302361 if (!of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
2362 (u32 *)&mdwc->core_clk_rate)) {
2363 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
2364 mdwc->core_clk_rate);
2365 } else {
2366 /*
2367 * Get Max supported clk frequency for USB Core CLK and request
2368 * to set the same.
2369 */
2370 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
2371 }
2372
Mayank Rana511f3b22016-08-02 12:00:11 -07002373 if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
2374 dev_err(mdwc->dev, "fail to get core clk max freq.\n");
2375 } else {
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302376 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2377 mdwc->core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002378 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2379 if (ret)
2380 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n",
2381 ret);
2382 }
2383
2384 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2385 if (IS_ERR(mdwc->sleep_clk)) {
2386 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2387 ret = PTR_ERR(mdwc->sleep_clk);
2388 return ret;
2389 }
2390
2391 clk_set_rate(mdwc->sleep_clk, 32000);
2392 mdwc->utmi_clk_rate = 19200000;
2393 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2394 if (IS_ERR(mdwc->utmi_clk)) {
2395 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2396 ret = PTR_ERR(mdwc->utmi_clk);
2397 return ret;
2398 }
2399
2400 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2401 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2402 if (IS_ERR(mdwc->bus_aggr_clk))
2403 mdwc->bus_aggr_clk = NULL;
2404
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302405 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2406 if (IS_ERR(mdwc->noc_aggr_clk))
2407 mdwc->noc_aggr_clk = NULL;
2408
Mayank Rana511f3b22016-08-02 12:00:11 -07002409 if (of_property_match_string(mdwc->dev->of_node,
2410 "clock-names", "cfg_ahb_clk") >= 0) {
2411 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2412 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2413 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2414 mdwc->cfg_ahb_clk = NULL;
2415 if (ret != -EPROBE_DEFER)
2416 dev_err(mdwc->dev,
2417 "failed to get cfg_ahb_clk ret %d\n",
2418 ret);
2419 return ret;
2420 }
2421 }
2422
2423 return 0;
2424}
2425
2426static int dwc3_msm_id_notifier(struct notifier_block *nb,
2427 unsigned long event, void *ptr)
2428{
2429 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002430 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002431 struct extcon_dev *edev = ptr;
2432 enum dwc3_id_state id;
2433 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002434 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002435
2436 if (!edev) {
2437 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2438 goto done;
2439 }
2440
2441 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2442
2443 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2444
2445 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2446 if (cc_state < 0)
2447 mdwc->typec_orientation = ORIENTATION_NONE;
2448 else
2449 mdwc->typec_orientation =
2450 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2451
Hemant Kumarde1df692016-04-26 19:36:48 -07002452 dev_dbg(mdwc->dev, "cc_state:%d", mdwc->typec_orientation);
2453
2454 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2455 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
2456
Mayank Rana511f3b22016-08-02 12:00:11 -07002457 if (mdwc->id_state != id) {
2458 mdwc->id_state = id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002459 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2460 }
2461
2462done:
2463 return NOTIFY_DONE;
2464}
2465
2466static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2467 unsigned long event, void *ptr)
2468{
2469 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2470 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2471 struct extcon_dev *edev = ptr;
2472 int cc_state;
Hemant Kumarde1df692016-04-26 19:36:48 -07002473 int speed;
Mayank Rana511f3b22016-08-02 12:00:11 -07002474
2475 if (!edev) {
2476 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2477 goto done;
2478 }
2479
2480 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2481
2482 if (mdwc->vbus_active == event)
2483 return NOTIFY_DONE;
2484
2485 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2486 if (cc_state < 0)
2487 mdwc->typec_orientation = ORIENTATION_NONE;
2488 else
2489 mdwc->typec_orientation =
2490 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2491
Hemant Kumarde1df692016-04-26 19:36:48 -07002492 dev_dbg(mdwc->dev, "cc_state:%d", mdwc->typec_orientation);
2493
2494 speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
2495 dwc->maximum_speed = (speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
2496
Mayank Rana511f3b22016-08-02 12:00:11 -07002497 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002498 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002499 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002500done:
2501 return NOTIFY_DONE;
2502}
2503
2504static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2505{
2506 struct device_node *node = mdwc->dev->of_node;
2507 struct extcon_dev *edev;
2508 int ret = 0;
2509
2510 if (!of_property_read_bool(node, "extcon"))
2511 return 0;
2512
2513 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2514 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2515 return PTR_ERR(edev);
2516
2517 if (!IS_ERR(edev)) {
2518 mdwc->extcon_vbus = edev;
2519 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2520 ret = extcon_register_notifier(edev, EXTCON_USB,
2521 &mdwc->vbus_nb);
2522 if (ret < 0) {
2523 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2524 return ret;
2525 }
2526 }
2527
2528 /* if a second phandle was provided, use it to get a separate edev */
2529 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2530 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2531 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2532 ret = PTR_ERR(edev);
2533 goto err;
2534 }
2535 }
2536
2537 if (!IS_ERR(edev)) {
2538 mdwc->extcon_id = edev;
2539 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2540 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2541 &mdwc->id_nb);
2542 if (ret < 0) {
2543 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2544 goto err;
2545 }
2546 }
2547
2548 return 0;
2549err:
2550 if (mdwc->extcon_vbus)
2551 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2552 &mdwc->vbus_nb);
2553 return ret;
2554}
2555
2556static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2557 char *buf)
2558{
2559 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2560
2561 if (mdwc->vbus_active)
2562 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2563 if (mdwc->id_state == DWC3_ID_GROUND)
2564 return snprintf(buf, PAGE_SIZE, "host\n");
2565
2566 return snprintf(buf, PAGE_SIZE, "none\n");
2567}
2568
2569static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2570 const char *buf, size_t count)
2571{
2572 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2573
2574 if (sysfs_streq(buf, "peripheral")) {
2575 mdwc->vbus_active = true;
2576 mdwc->id_state = DWC3_ID_FLOAT;
2577 } else if (sysfs_streq(buf, "host")) {
2578 mdwc->vbus_active = false;
2579 mdwc->id_state = DWC3_ID_GROUND;
2580 } else {
2581 mdwc->vbus_active = false;
2582 mdwc->id_state = DWC3_ID_FLOAT;
2583 }
2584
2585 dwc3_ext_event_notify(mdwc);
2586
2587 return count;
2588}
2589
2590static DEVICE_ATTR_RW(mode);
2591
2592static int dwc3_msm_probe(struct platform_device *pdev)
2593{
2594 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2595 struct device *dev = &pdev->dev;
2596 struct dwc3_msm *mdwc;
2597 struct dwc3 *dwc;
2598 struct resource *res;
2599 void __iomem *tcsr;
2600 bool host_mode;
2601 int ret = 0;
2602 int ext_hub_reset_gpio;
2603 u32 val;
2604
2605 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2606 if (!mdwc)
2607 return -ENOMEM;
2608
2609 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2610 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2611 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2612 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2613 return -EOPNOTSUPP;
2614 }
2615 }
2616
2617 platform_set_drvdata(pdev, mdwc);
2618 mdwc->dev = &pdev->dev;
2619
2620 INIT_LIST_HEAD(&mdwc->req_complete_list);
2621 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2622 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2623 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07002624 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07002625 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
2626
2627 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2628 if (!mdwc->dwc3_wq) {
2629 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2630 return -ENOMEM;
2631 }
2632
2633 /* Get all clks and gdsc reference */
2634 ret = dwc3_msm_get_clk_gdsc(mdwc);
2635 if (ret) {
2636 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2637 return ret;
2638 }
2639
2640 mdwc->id_state = DWC3_ID_FLOAT;
2641 set_bit(ID, &mdwc->inputs);
2642
2643 mdwc->charging_disabled = of_property_read_bool(node,
2644 "qcom,charging-disabled");
2645
2646 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2647 &mdwc->lpm_to_suspend_delay);
2648 if (ret) {
2649 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2650 mdwc->lpm_to_suspend_delay = 0;
2651 }
2652
2653 /*
2654 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2655 * DP and DM linestate transitions during low power mode.
2656 */
2657 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2658 if (mdwc->hs_phy_irq < 0) {
2659 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2660 ret = -EINVAL;
2661 goto err;
2662 } else {
2663 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2664 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2665 msm_dwc3_pwr_irq,
2666 msm_dwc3_pwr_irq_thread,
2667 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2668 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2669 if (ret) {
2670 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2671 ret);
2672 goto err;
2673 }
2674 }
2675
2676 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2677 if (mdwc->ss_phy_irq < 0) {
2678 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2679 } else {
2680 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2681 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2682 msm_dwc3_pwr_irq,
2683 msm_dwc3_pwr_irq_thread,
2684 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2685 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2686 if (ret) {
2687 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2688 ret);
2689 goto err;
2690 }
2691 }
2692
2693 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2694 if (mdwc->pwr_event_irq < 0) {
2695 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2696 ret = -EINVAL;
2697 goto err;
2698 } else {
2699 /* will be enabled in dwc3_msm_resume() */
2700 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2701 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2702 msm_dwc3_pwr_irq,
2703 msm_dwc3_pwr_irq_thread,
2704 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2705 "msm_dwc3", mdwc);
2706 if (ret) {
2707 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2708 ret);
2709 goto err;
2710 }
2711 }
2712
2713 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2714 if (!res) {
2715 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2716 } else {
2717 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2718 resource_size(res));
2719 if (IS_ERR_OR_NULL(tcsr)) {
2720 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2721 } else {
2722 /* Enable USB3 on the primary USB port. */
2723 writel_relaxed(0x1, tcsr);
2724 /*
2725 * Ensure that TCSR write is completed before
2726 * USB registers initialization.
2727 */
2728 mb();
2729 }
2730 }
2731
2732 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2733 if (!res) {
2734 dev_err(&pdev->dev, "missing memory base resource\n");
2735 ret = -ENODEV;
2736 goto err;
2737 }
2738
2739 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2740 resource_size(res));
2741 if (!mdwc->base) {
2742 dev_err(&pdev->dev, "ioremap failed\n");
2743 ret = -ENODEV;
2744 goto err;
2745 }
2746
2747 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2748 "ahb2phy_base");
2749 if (res) {
2750 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2751 res->start, resource_size(res));
2752 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2753 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2754 mdwc->ahb2phy_base = NULL;
2755 } else {
2756 /*
2757 * On some targets cfg_ahb_clk depends upon usb gdsc
2758 * regulator. If cfg_ahb_clk is enabled without
2759 * turning on usb gdsc regulator clk is stuck off.
2760 */
2761 dwc3_msm_config_gdsc(mdwc, 1);
2762 clk_prepare_enable(mdwc->cfg_ahb_clk);
2763 /* Configure AHB2PHY for one wait state read/write*/
2764 val = readl_relaxed(mdwc->ahb2phy_base +
2765 PERIPH_SS_AHB2PHY_TOP_CFG);
2766 if (val != ONE_READ_WRITE_WAIT) {
2767 writel_relaxed(ONE_READ_WRITE_WAIT,
2768 mdwc->ahb2phy_base +
2769 PERIPH_SS_AHB2PHY_TOP_CFG);
2770 /* complete above write before using USB PHY */
2771 mb();
2772 }
2773 clk_disable_unprepare(mdwc->cfg_ahb_clk);
2774 dwc3_msm_config_gdsc(mdwc, 0);
2775 }
2776 }
2777
2778 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
2779 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
2780 if (IS_ERR(mdwc->dbm)) {
2781 dev_err(&pdev->dev, "unable to get dbm device\n");
2782 ret = -EPROBE_DEFER;
2783 goto err;
2784 }
2785 /*
2786 * Add power event if the dbm indicates coming out of L1
2787 * by interrupt
2788 */
2789 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
2790 if (!mdwc->pwr_event_irq) {
2791 dev_err(&pdev->dev,
2792 "need pwr_event_irq exiting L1\n");
2793 ret = -EINVAL;
2794 goto err;
2795 }
2796 }
2797 }
2798
2799 ext_hub_reset_gpio = of_get_named_gpio(node,
2800 "qcom,ext-hub-reset-gpio", 0);
2801
2802 if (gpio_is_valid(ext_hub_reset_gpio)
2803 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
2804 "qcom,ext-hub-reset-gpio"))) {
2805 /* reset external hub */
2806 gpio_direction_output(ext_hub_reset_gpio, 1);
2807 /*
2808 * Hub reset should be asserted for minimum 5microsec
2809 * before deasserting.
2810 */
2811 usleep_range(5, 1000);
2812 gpio_direction_output(ext_hub_reset_gpio, 0);
2813 }
2814
2815 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
2816 &mdwc->tx_fifo_size))
2817 dev_err(&pdev->dev,
2818 "unable to read platform data tx fifo size\n");
2819
2820 mdwc->disable_host_mode_pm = of_property_read_bool(node,
2821 "qcom,disable-host-mode-pm");
2822
2823 dwc3_set_notifier(&dwc3_msm_notify_event);
2824
2825 /* Assumes dwc3 is the first DT child of dwc3-msm */
2826 dwc3_node = of_get_next_available_child(node, NULL);
2827 if (!dwc3_node) {
2828 dev_err(&pdev->dev, "failed to find dwc3 child\n");
2829 ret = -ENODEV;
2830 goto err;
2831 }
2832
2833 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2834 if (ret) {
2835 dev_err(&pdev->dev,
2836 "failed to add create dwc3 core\n");
2837 of_node_put(dwc3_node);
2838 goto err;
2839 }
2840
2841 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
2842 of_node_put(dwc3_node);
2843 if (!mdwc->dwc3) {
2844 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
2845 goto put_dwc3;
2846 }
2847
2848 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2849 "usb-phy", 0);
2850 if (IS_ERR(mdwc->hs_phy)) {
2851 dev_err(&pdev->dev, "unable to get hsphy device\n");
2852 ret = PTR_ERR(mdwc->hs_phy);
2853 goto put_dwc3;
2854 }
2855 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2856 "usb-phy", 1);
2857 if (IS_ERR(mdwc->ss_phy)) {
2858 dev_err(&pdev->dev, "unable to get ssphy device\n");
2859 ret = PTR_ERR(mdwc->ss_phy);
2860 goto put_dwc3;
2861 }
2862
2863 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2864 if (mdwc->bus_scale_table) {
2865 mdwc->bus_perf_client =
2866 msm_bus_scale_register_client(mdwc->bus_scale_table);
2867 }
2868
2869 dwc = platform_get_drvdata(mdwc->dwc3);
2870 if (!dwc) {
2871 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
2872 goto put_dwc3;
2873 }
2874
2875 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
2876 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
2877
2878 if (cpu_to_affin)
2879 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2880
2881 /*
2882 * Clocks and regulators will not be turned on until the first time
2883 * runtime PM resume is called. This is to allow for booting up with
2884 * charger already connected so as not to disturb PHY line states.
2885 */
2886 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
2887 atomic_set(&dwc->in_lpm, 1);
2888 pm_runtime_set_suspended(mdwc->dev);
2889 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
2890 pm_runtime_use_autosuspend(mdwc->dev);
2891 pm_runtime_enable(mdwc->dev);
2892 device_init_wakeup(mdwc->dev, 1);
2893
2894 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
2895 pm_runtime_get_noresume(mdwc->dev);
2896
2897 ret = dwc3_msm_extcon_register(mdwc);
2898 if (ret)
2899 goto put_dwc3;
2900
2901 /* Update initial VBUS/ID state from extcon */
2902 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
2903 EXTCON_USB))
2904 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
2905 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
2906 EXTCON_USB_HOST))
2907 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
2908
2909 device_create_file(&pdev->dev, &dev_attr_mode);
2910
2911 schedule_delayed_work(&mdwc->sm_work, 0);
2912
2913 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
2914 if (!dwc->is_drd && host_mode) {
2915 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
2916 mdwc->id_state = DWC3_ID_GROUND;
2917 dwc3_ext_event_notify(mdwc);
2918 }
2919
2920 return 0;
2921
2922put_dwc3:
2923 platform_device_put(mdwc->dwc3);
2924 if (mdwc->bus_perf_client)
2925 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2926err:
2927 return ret;
2928}
2929
2930static int dwc3_msm_remove_children(struct device *dev, void *data)
2931{
2932 device_unregister(dev);
2933 return 0;
2934}
2935
2936static int dwc3_msm_remove(struct platform_device *pdev)
2937{
2938 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
2939 int ret_pm;
2940
2941 device_remove_file(&pdev->dev, &dev_attr_mode);
2942
2943 if (cpu_to_affin)
2944 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2945
2946 /*
2947 * In case of system suspend, pm_runtime_get_sync fails.
2948 * Hence turn ON the clocks manually.
2949 */
2950 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07002951 if (ret_pm < 0) {
2952 dev_err(mdwc->dev,
2953 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302954 if (mdwc->noc_aggr_clk)
2955 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002956 clk_prepare_enable(mdwc->utmi_clk);
2957 clk_prepare_enable(mdwc->core_clk);
2958 clk_prepare_enable(mdwc->iface_clk);
2959 clk_prepare_enable(mdwc->sleep_clk);
2960 if (mdwc->bus_aggr_clk)
2961 clk_prepare_enable(mdwc->bus_aggr_clk);
2962 clk_prepare_enable(mdwc->xo_clk);
2963 }
2964
2965 cancel_delayed_work_sync(&mdwc->sm_work);
2966
2967 if (mdwc->hs_phy)
2968 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
2969 platform_device_put(mdwc->dwc3);
2970 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
2971
Mayank Rana511f3b22016-08-02 12:00:11 -07002972 pm_runtime_disable(mdwc->dev);
2973 pm_runtime_barrier(mdwc->dev);
2974 pm_runtime_put_sync(mdwc->dev);
2975 pm_runtime_set_suspended(mdwc->dev);
2976 device_wakeup_disable(mdwc->dev);
2977
2978 if (mdwc->bus_perf_client)
2979 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2980
2981 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
2982 regulator_disable(mdwc->vbus_reg);
2983
2984 disable_irq(mdwc->hs_phy_irq);
2985 if (mdwc->ss_phy_irq)
2986 disable_irq(mdwc->ss_phy_irq);
2987 disable_irq(mdwc->pwr_event_irq);
2988
2989 clk_disable_unprepare(mdwc->utmi_clk);
2990 clk_set_rate(mdwc->core_clk, 19200000);
2991 clk_disable_unprepare(mdwc->core_clk);
2992 clk_disable_unprepare(mdwc->iface_clk);
2993 clk_disable_unprepare(mdwc->sleep_clk);
2994 clk_disable_unprepare(mdwc->xo_clk);
2995 clk_put(mdwc->xo_clk);
2996
2997 dwc3_msm_config_gdsc(mdwc, 0);
2998
2999 return 0;
3000}
3001
3002#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3003
3004/**
3005 * dwc3_otg_start_host - helper function for starting/stoping the host
3006 * controller driver.
3007 *
3008 * @mdwc: Pointer to the dwc3_msm structure.
3009 * @on: start / stop the host controller driver.
3010 *
3011 * Returns 0 on success otherwise negative errno.
3012 */
3013static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3014{
3015 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3016 int ret = 0;
3017
3018 if (!dwc->xhci)
3019 return -EINVAL;
3020
3021 /*
3022 * The vbus_reg pointer could have multiple values
3023 * NULL: regulator_get() hasn't been called, or was previously deferred
3024 * IS_ERR: regulator could not be obtained, so skip using it
3025 * Valid pointer otherwise
3026 */
3027 if (!mdwc->vbus_reg) {
3028 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3029 "vbus_dwc3");
3030 if (IS_ERR(mdwc->vbus_reg) &&
3031 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3032 /* regulators may not be ready, so retry again later */
3033 mdwc->vbus_reg = NULL;
3034 return -EPROBE_DEFER;
3035 }
3036 }
3037
3038 if (on) {
3039 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3040
Mayank Rana511f3b22016-08-02 12:00:11 -07003041 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Hemant Kumarde1df692016-04-26 19:36:48 -07003042 if (dwc->maximum_speed == USB_SPEED_SUPER)
3043 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3044
3045 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003046 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3047 if (!IS_ERR(mdwc->vbus_reg))
3048 ret = regulator_enable(mdwc->vbus_reg);
3049 if (ret) {
3050 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3051 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3052 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3053 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003054 return ret;
3055 }
3056
3057 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3058
3059 /*
3060 * FIXME If micro A cable is disconnected during system suspend,
3061 * xhci platform device will be removed before runtime pm is
3062 * enabled for xhci device. Due to this, disable_depth becomes
3063 * greater than one and runtimepm is not enabled for next microA
3064 * connect. Fix this by calling pm_runtime_init for xhci device.
3065 */
3066 pm_runtime_init(&dwc->xhci->dev);
3067 ret = platform_device_add(dwc->xhci);
3068 if (ret) {
3069 dev_err(mdwc->dev,
3070 "%s: failed to add XHCI pdev ret=%d\n",
3071 __func__, ret);
3072 if (!IS_ERR(mdwc->vbus_reg))
3073 regulator_disable(mdwc->vbus_reg);
3074 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3075 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3076 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003077 return ret;
3078 }
3079
3080 /*
3081 * In some cases it is observed that USB PHY is not going into
3082 * suspend with host mode suspend functionality. Hence disable
3083 * XHCI's runtime PM here if disable_host_mode_pm is set.
3084 */
3085 if (mdwc->disable_host_mode_pm)
3086 pm_runtime_disable(&dwc->xhci->dev);
3087
3088 mdwc->in_host_mode = true;
3089 dwc3_usb3_phy_suspend(dwc, true);
3090
3091 /* xHCI should have incremented child count as necessary */
Mayank Rana511f3b22016-08-02 12:00:11 -07003092 pm_runtime_mark_last_busy(mdwc->dev);
3093 pm_runtime_put_sync_autosuspend(mdwc->dev);
3094 } else {
3095 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3096
3097 if (!IS_ERR(mdwc->vbus_reg))
3098 ret = regulator_disable(mdwc->vbus_reg);
3099 if (ret) {
3100 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3101 return ret;
3102 }
3103
3104 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003105 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3106 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3107 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3108 platform_device_del(dwc->xhci);
3109
3110 /*
3111 * Perform USB hardware RESET (both core reset and DBM reset)
3112 * when moving from host to peripheral. This is required for
3113 * peripheral mode to work.
3114 */
3115 dwc3_msm_block_reset(mdwc, true);
3116
3117 dwc3_usb3_phy_suspend(dwc, false);
3118 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3119
3120 mdwc->in_host_mode = false;
3121
3122 /* re-init core and OTG registers as block reset clears these */
3123 dwc3_post_host_reset_core_init(dwc);
3124 pm_runtime_mark_last_busy(mdwc->dev);
3125 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003126 }
3127
3128 return 0;
3129}
3130
3131static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3132{
3133 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3134
3135 /* Update OTG VBUS Valid from HSPHY to controller */
3136 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3137 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3138 UTMI_OTG_VBUS_VALID,
3139 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3140
3141 /* Update only if Super Speed is supported */
3142 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3143 /* Update VBUS Valid from SSPHY to controller */
3144 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3145 LANE0_PWR_PRESENT,
3146 vbus_present ? LANE0_PWR_PRESENT : 0);
3147 }
3148}
3149
3150/**
3151 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3152 *
3153 * @mdwc: Pointer to the dwc3_msm structure.
3154 * @on: Turn ON/OFF the gadget.
3155 *
3156 * Returns 0 on success otherwise negative errno.
3157 */
3158static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3159{
3160 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3161
3162 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003163
3164 if (on) {
3165 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3166 __func__, dwc->gadget.name);
3167
3168 dwc3_override_vbus_status(mdwc, true);
3169 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3170 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3171
3172 /*
3173 * Core reset is not required during start peripheral. Only
3174 * DBM reset is required, hence perform only DBM reset here.
3175 */
3176 dwc3_msm_block_reset(mdwc, false);
3177
3178 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3179 usb_gadget_vbus_connect(&dwc->gadget);
3180 } else {
3181 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3182 __func__, dwc->gadget.name);
3183 usb_gadget_vbus_disconnect(&dwc->gadget);
3184 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3185 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3186 dwc3_override_vbus_status(mdwc, false);
3187 dwc3_usb3_phy_suspend(dwc, false);
3188 }
3189
3190 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003191
3192 return 0;
3193}
3194
3195static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3196{
Jack Pham8caff352016-08-19 16:33:55 -07003197 union power_supply_propval pval = {0};
Jack Phamd72bafe2016-08-09 11:07:22 -07003198 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07003199
3200 if (mdwc->charging_disabled)
3201 return 0;
3202
3203 if (mdwc->max_power == mA)
3204 return 0;
3205
3206 if (!mdwc->usb_psy) {
3207 mdwc->usb_psy = power_supply_get_by_name("usb");
3208 if (!mdwc->usb_psy) {
3209 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3210 return -ENODEV;
3211 }
3212 }
3213
Jack Pham8caff352016-08-19 16:33:55 -07003214 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_TYPE, &pval);
3215 if (pval.intval != POWER_SUPPLY_TYPE_USB)
3216 return 0;
3217
Mayank Rana511f3b22016-08-02 12:00:11 -07003218 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3219
Mayank Rana511f3b22016-08-02 12:00:11 -07003220 /* Set max current limit in uA */
Jack Pham8caff352016-08-19 16:33:55 -07003221 pval.intval = 1000 * mA;
Jack Phamd72bafe2016-08-09 11:07:22 -07003222 ret = power_supply_set_property(mdwc->usb_psy,
3223 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3224 if (ret) {
3225 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3226 return ret;
3227 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003228
3229 mdwc->max_power = mA;
3230 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003231}
3232
3233
3234/**
3235 * dwc3_otg_sm_work - workqueue function.
3236 *
3237 * @w: Pointer to the dwc3 otg workqueue
3238 *
3239 * NOTE: After any change in otg_state, we must reschdule the state machine.
3240 */
3241static void dwc3_otg_sm_work(struct work_struct *w)
3242{
3243 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3244 struct dwc3 *dwc = NULL;
3245 bool work = 0;
3246 int ret = 0;
3247 unsigned long delay = 0;
3248 const char *state;
3249
3250 if (mdwc->dwc3)
3251 dwc = platform_get_drvdata(mdwc->dwc3);
3252
3253 if (!dwc) {
3254 dev_err(mdwc->dev, "dwc is NULL.\n");
3255 return;
3256 }
3257
3258 state = usb_otg_state_string(mdwc->otg_state);
3259 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana511f3b22016-08-02 12:00:11 -07003260
3261 /* Check OTG state */
3262 switch (mdwc->otg_state) {
3263 case OTG_STATE_UNDEFINED:
3264 /* Do nothing if no cable connected */
3265 if (test_bit(ID, &mdwc->inputs) &&
3266 !test_bit(B_SESS_VLD, &mdwc->inputs))
3267 break;
3268
Mayank Rana511f3b22016-08-02 12:00:11 -07003269 mdwc->otg_state = OTG_STATE_B_IDLE;
3270 /* fall-through */
3271 case OTG_STATE_B_IDLE:
3272 if (!test_bit(ID, &mdwc->inputs)) {
3273 dev_dbg(mdwc->dev, "!id\n");
3274 mdwc->otg_state = OTG_STATE_A_IDLE;
3275 work = 1;
3276 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3277 dev_dbg(mdwc->dev, "b_sess_vld\n");
3278 /*
3279 * Increment pm usage count upon cable connect. Count
3280 * is decremented in OTG_STATE_B_PERIPHERAL state on
3281 * cable disconnect or in bus suspend.
3282 */
3283 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003284 dwc3_otg_start_peripheral(mdwc, 1);
3285 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3286 work = 1;
3287 } else {
3288 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3289 dev_dbg(mdwc->dev, "Cable disconnected\n");
3290 }
3291 break;
3292
3293 case OTG_STATE_B_PERIPHERAL:
3294 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3295 !test_bit(ID, &mdwc->inputs)) {
3296 dev_dbg(mdwc->dev, "!id || !bsv\n");
3297 mdwc->otg_state = OTG_STATE_B_IDLE;
3298 dwc3_otg_start_peripheral(mdwc, 0);
3299 /*
3300 * Decrement pm usage count upon cable disconnect
3301 * which was incremented upon cable connect in
3302 * OTG_STATE_B_IDLE state
3303 */
3304 pm_runtime_put_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003305 work = 1;
3306 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3307 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3308 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3309 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3310 /*
3311 * Decrement pm usage count upon bus suspend.
3312 * Count was incremented either upon cable
3313 * connect in OTG_STATE_B_IDLE or host
3314 * initiated resume after bus suspend in
3315 * OTG_STATE_B_SUSPEND state
3316 */
3317 pm_runtime_mark_last_busy(mdwc->dev);
3318 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003319 }
3320 break;
3321
3322 case OTG_STATE_B_SUSPEND:
3323 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3324 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3325 mdwc->otg_state = OTG_STATE_B_IDLE;
3326 dwc3_otg_start_peripheral(mdwc, 0);
3327 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3328 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3329 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3330 /*
3331 * Increment pm usage count upon host
3332 * initiated resume. Count was decremented
3333 * upon bus suspend in
3334 * OTG_STATE_B_PERIPHERAL state.
3335 */
3336 pm_runtime_get_sync(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003337 }
3338 break;
3339
3340 case OTG_STATE_A_IDLE:
3341 /* Switch to A-Device*/
3342 if (test_bit(ID, &mdwc->inputs)) {
3343 dev_dbg(mdwc->dev, "id\n");
3344 mdwc->otg_state = OTG_STATE_B_IDLE;
3345 mdwc->vbus_retry_count = 0;
3346 work = 1;
3347 } else {
3348 mdwc->otg_state = OTG_STATE_A_HOST;
3349 ret = dwc3_otg_start_host(mdwc, 1);
3350 if ((ret == -EPROBE_DEFER) &&
3351 mdwc->vbus_retry_count < 3) {
3352 /*
3353 * Get regulator failed as regulator driver is
3354 * not up yet. Will try to start host after 1sec
3355 */
3356 mdwc->otg_state = OTG_STATE_A_IDLE;
3357 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3358 delay = VBUS_REG_CHECK_DELAY;
3359 work = 1;
3360 mdwc->vbus_retry_count++;
3361 } else if (ret) {
3362 dev_err(mdwc->dev, "unable to start host\n");
3363 mdwc->otg_state = OTG_STATE_A_IDLE;
3364 goto ret;
3365 }
3366 }
3367 break;
3368
3369 case OTG_STATE_A_HOST:
3370 if (test_bit(ID, &mdwc->inputs)) {
3371 dev_dbg(mdwc->dev, "id\n");
3372 dwc3_otg_start_host(mdwc, 0);
3373 mdwc->otg_state = OTG_STATE_B_IDLE;
3374 mdwc->vbus_retry_count = 0;
3375 work = 1;
3376 } else {
3377 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003378 if (dwc)
3379 pm_runtime_resume(&dwc->xhci->dev);
3380 }
3381 break;
3382
3383 default:
3384 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3385
3386 }
3387
3388 if (work)
3389 schedule_delayed_work(&mdwc->sm_work, delay);
3390
3391ret:
3392 return;
3393}
3394
3395#ifdef CONFIG_PM_SLEEP
3396static int dwc3_msm_pm_suspend(struct device *dev)
3397{
3398 int ret = 0;
3399 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3400 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3401
3402 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003403
3404 flush_workqueue(mdwc->dwc3_wq);
3405 if (!atomic_read(&dwc->in_lpm)) {
3406 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3407 return -EBUSY;
3408 }
3409
3410 ret = dwc3_msm_suspend(mdwc);
3411 if (!ret)
3412 atomic_set(&mdwc->pm_suspended, 1);
3413
3414 return ret;
3415}
3416
3417static int dwc3_msm_pm_resume(struct device *dev)
3418{
3419 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3420
3421 dev_dbg(dev, "dwc3-msm PM resume\n");
3422
Mayank Rana511f3b22016-08-02 12:00:11 -07003423 /* flush to avoid race in read/write of pm_suspended */
3424 flush_workqueue(mdwc->dwc3_wq);
3425 atomic_set(&mdwc->pm_suspended, 0);
3426
3427 /* kick in otg state machine */
3428 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3429
3430 return 0;
3431}
3432#endif
3433
3434#ifdef CONFIG_PM
3435static int dwc3_msm_runtime_idle(struct device *dev)
3436{
3437 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003438
3439 return 0;
3440}
3441
3442static int dwc3_msm_runtime_suspend(struct device *dev)
3443{
3444 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3445
3446 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003447
3448 return dwc3_msm_suspend(mdwc);
3449}
3450
3451static int dwc3_msm_runtime_resume(struct device *dev)
3452{
3453 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3454
3455 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003456
3457 return dwc3_msm_resume(mdwc);
3458}
3459#endif
3460
3461static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3462 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3463 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3464 dwc3_msm_runtime_idle)
3465};
3466
3467static const struct of_device_id of_dwc3_matach[] = {
3468 {
3469 .compatible = "qcom,dwc-usb3-msm",
3470 },
3471 { },
3472};
3473MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3474
3475static struct platform_driver dwc3_msm_driver = {
3476 .probe = dwc3_msm_probe,
3477 .remove = dwc3_msm_remove,
3478 .driver = {
3479 .name = "msm-dwc3",
3480 .pm = &dwc3_msm_dev_pm_ops,
3481 .of_match_table = of_dwc3_matach,
3482 },
3483};
3484
3485MODULE_LICENSE("GPL v2");
3486MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3487
3488static int dwc3_msm_init(void)
3489{
3490 return platform_driver_register(&dwc3_msm_driver);
3491}
3492module_init(dwc3_msm_init);
3493
3494static void __exit dwc3_msm_exit(void)
3495{
3496 platform_driver_unregister(&dwc3_msm_driver);
3497}
3498module_exit(dwc3_msm_exit);