blob: 792d6fe3b8f61504c261bec04b08b777847088bb [file] [log] [blame]
Mayank Rana511f3b22016-08-02 12:00:11 -07001/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/delay.h>
30#include <linux/of.h>
31#include <linux/of_platform.h>
32#include <linux/of_gpio.h>
33#include <linux/list.h>
34#include <linux/uaccess.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/usb/of.h>
38#include <linux/usb/msm_hsusb.h>
39#include <linux/regulator/consumer.h>
40#include <linux/pm_wakeup.h>
41#include <linux/power_supply.h>
42#include <linux/cdev.h>
43#include <linux/completion.h>
44#include <linux/clk/msm-clk.h>
45#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
48
49#include "power.h"
50#include "core.h"
51#include "gadget.h"
52#include "dbm.h"
53#include "debug.h"
54#include "xhci.h"
55
56/* time out to wait for USB cable status notification (in ms)*/
57#define SM_INIT_TIMEOUT 30000
58
59/* AHB2PHY register offsets */
60#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
61
62/* AHB2PHY read/write waite value */
63#define ONE_READ_WRITE_WAIT 0x11
64
65/* cpu to fix usb interrupt */
66static int cpu_to_affin;
67module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
69
70/* XHCI registers */
71#define USB3_HCSPARAMS1 (0x4)
72#define USB3_PORTSC (0x420)
73
74/**
75 * USB QSCRATCH Hardware registers
76 *
77 */
78#define QSCRATCH_REG_OFFSET (0x000F8800)
79#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
80#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
81#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
82#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
83
84#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
85#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
86#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
87#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
88#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
89
90/* QSCRATCH_GENERAL_CFG register bit offset */
91#define PIPE_UTMI_CLK_SEL BIT(0)
92#define PIPE3_PHYSTATUS_SW BIT(3)
93#define PIPE_UTMI_CLK_DIS BIT(8)
94
95#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
96#define UTMI_OTG_VBUS_VALID BIT(20)
97#define SW_SESSVLD_SEL BIT(28)
98
99#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
100#define LANE0_PWR_PRESENT BIT(24)
101
102/* GSI related registers */
103#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
104#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
105
106#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
107#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
108#define GSI_CLK_EN_MASK BIT(12)
109#define BLOCK_GSI_WR_GO_MASK BIT(1)
110#define GSI_EN_MASK BIT(0)
111
112#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
113#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
114#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
115#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
116
117#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
118#define GSI_WR_CTRL_STATE_MASK BIT(15)
119
120struct dwc3_msm_req_complete {
121 struct list_head list_item;
122 struct usb_request *req;
123 void (*orig_complete)(struct usb_ep *ep,
124 struct usb_request *req);
125};
126
127enum dwc3_id_state {
128 DWC3_ID_GROUND = 0,
129 DWC3_ID_FLOAT,
130};
131
132/* for type c cable */
133enum plug_orientation {
134 ORIENTATION_NONE,
135 ORIENTATION_CC1,
136 ORIENTATION_CC2,
137};
138
139/* Input bits to state machine (mdwc->inputs) */
140
141#define ID 0
142#define B_SESS_VLD 1
143#define B_SUSPEND 2
144
145struct dwc3_msm {
146 struct device *dev;
147 void __iomem *base;
148 void __iomem *ahb2phy_base;
149 struct platform_device *dwc3;
150 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
151 struct list_head req_complete_list;
152 struct clk *xo_clk;
153 struct clk *core_clk;
154 long core_clk_rate;
155 struct clk *iface_clk;
156 struct clk *sleep_clk;
157 struct clk *utmi_clk;
158 unsigned int utmi_clk_rate;
159 struct clk *utmi_clk_src;
160 struct clk *bus_aggr_clk;
161 struct clk *cfg_ahb_clk;
162 struct regulator *dwc3_gdsc;
163
164 struct usb_phy *hs_phy, *ss_phy;
165
166 struct dbm *dbm;
167
168 /* VBUS regulator for host mode */
169 struct regulator *vbus_reg;
170 int vbus_retry_count;
171 bool resume_pending;
172 atomic_t pm_suspended;
173 int hs_phy_irq;
174 int ss_phy_irq;
175 struct work_struct resume_work;
176 struct work_struct restart_usb_work;
177 bool in_restart;
178 struct workqueue_struct *dwc3_wq;
179 struct delayed_work sm_work;
180 unsigned long inputs;
181 unsigned int max_power;
182 bool charging_disabled;
183 enum usb_otg_state otg_state;
184 enum usb_chg_state chg_state;
185 struct work_struct bus_vote_w;
186 unsigned int bus_vote;
187 u32 bus_perf_client;
188 struct msm_bus_scale_pdata *bus_scale_table;
189 struct power_supply *usb_psy;
190 bool in_host_mode;
191 unsigned int tx_fifo_size;
192 bool vbus_active;
193 bool suspend;
194 bool disable_host_mode_pm;
195 enum dwc3_id_state id_state;
196 unsigned long lpm_flags;
197#define MDWC3_SS_PHY_SUSPEND BIT(0)
198#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
199#define MDWC3_POWER_COLLAPSE BIT(2)
200
201 unsigned int irq_to_affin;
202 struct notifier_block dwc3_cpu_notifier;
203
204 struct extcon_dev *extcon_vbus;
205 struct extcon_dev *extcon_id;
206 struct notifier_block vbus_nb;
207 struct notifier_block id_nb;
208
209 int pwr_event_irq;
210 atomic_t in_p3;
211 unsigned int lpm_to_suspend_delay;
212 bool init;
213 enum plug_orientation typec_orientation;
214};
215
216#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
217#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
218#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
219
220#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
221#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
222#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
223
224#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
225#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
226#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
227
228#define DSTS_CONNECTSPD_SS 0x4
229
230
231static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
232static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
233
234/**
235 *
236 * Read register with debug info.
237 *
238 * @base - DWC3 base virtual address.
239 * @offset - register offset.
240 *
241 * @return u32
242 */
243static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
244{
245 u32 val = ioread32(base + offset);
246 return val;
247}
248
249/**
250 * Read register masked field with debug info.
251 *
252 * @base - DWC3 base virtual address.
253 * @offset - register offset.
254 * @mask - register bitmask.
255 *
256 * @return u32
257 */
258static inline u32 dwc3_msm_read_reg_field(void *base,
259 u32 offset,
260 const u32 mask)
261{
262 u32 shift = find_first_bit((void *)&mask, 32);
263 u32 val = ioread32(base + offset);
264
265 val &= mask; /* clear other bits */
266 val >>= shift;
267 return val;
268}
269
270/**
271 *
272 * Write register with debug info.
273 *
274 * @base - DWC3 base virtual address.
275 * @offset - register offset.
276 * @val - value to write.
277 *
278 */
279static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
280{
281 iowrite32(val, base + offset);
282}
283
284/**
285 * Write register masked field with debug info.
286 *
287 * @base - DWC3 base virtual address.
288 * @offset - register offset.
289 * @mask - register bitmask.
290 * @val - value to write.
291 *
292 */
293static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
294 const u32 mask, u32 val)
295{
296 u32 shift = find_first_bit((void *)&mask, 32);
297 u32 tmp = ioread32(base + offset);
298
299 tmp &= ~mask; /* clear written bits */
300 val = tmp | (val << shift);
301 iowrite32(val, base + offset);
302}
303
304/**
305 * Write register and read back masked value to confirm it is written
306 *
307 * @base - DWC3 base virtual address.
308 * @offset - register offset.
309 * @mask - register bitmask specifying what should be updated
310 * @val - value to write.
311 *
312 */
313static inline void dwc3_msm_write_readback(void *base, u32 offset,
314 const u32 mask, u32 val)
315{
316 u32 write_val, tmp = ioread32(base + offset);
317
318 tmp &= ~mask; /* retain other bits */
319 write_val = tmp | val;
320
321 iowrite32(write_val, base + offset);
322
323 /* Read back to see if val was written */
324 tmp = ioread32(base + offset);
325 tmp &= mask; /* clear other bits */
326
327 if (tmp != val)
328 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
329 __func__, val, offset);
330}
331
332static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
333{
334 int i, num_ports;
335 u32 reg;
336
337 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
338 num_ports = HCS_MAX_PORTS(reg);
339
340 for (i = 0; i < num_ports; i++) {
341 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
342 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
343 return true;
344 }
345
346 return false;
347}
348
349static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
350{
351 u8 speed;
352
353 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
354 return !!(speed & DSTS_CONNECTSPD_SS);
355}
356
357static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
358{
359 if (mdwc->in_host_mode)
360 return dwc3_msm_is_host_superspeed(mdwc);
361
362 return dwc3_msm_is_dev_superspeed(mdwc);
363}
364
365#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
366/**
367 * Configure the DBM with the BAM's data fifo.
368 * This function is called by the USB BAM Driver
369 * upon initialization.
370 *
371 * @ep - pointer to usb endpoint.
372 * @addr - address of data fifo.
373 * @size - size of data fifo.
374 *
375 */
376int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
377 u32 size, u8 dst_pipe_idx)
378{
379 struct dwc3_ep *dep = to_dwc3_ep(ep);
380 struct dwc3 *dwc = dep->dwc;
381 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
382
383 dev_dbg(mdwc->dev, "%s\n", __func__);
384
385 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
386 dst_pipe_idx);
387}
388
389
390/**
391* Cleanups for msm endpoint on request complete.
392*
393* Also call original request complete.
394*
395* @usb_ep - pointer to usb_ep instance.
396* @request - pointer to usb_request instance.
397*
398* @return int - 0 on success, negative on error.
399*/
400static void dwc3_msm_req_complete_func(struct usb_ep *ep,
401 struct usb_request *request)
402{
403 struct dwc3_ep *dep = to_dwc3_ep(ep);
404 struct dwc3 *dwc = dep->dwc;
405 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
406 struct dwc3_msm_req_complete *req_complete = NULL;
407
408 /* Find original request complete function and remove it from list */
409 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
410 if (req_complete->req == request)
411 break;
412 }
413 if (!req_complete || req_complete->req != request) {
414 dev_err(dep->dwc->dev, "%s: could not find the request\n",
415 __func__);
416 return;
417 }
418 list_del(&req_complete->list_item);
419
420 /*
421 * Release another one TRB to the pool since DBM queue took 2 TRBs
422 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
423 * released only one.
424 */
425 dep->busy_slot++;
426
427 /* Unconfigure dbm ep */
428 dbm_ep_unconfig(mdwc->dbm, dep->number);
429
430 /*
431 * If this is the last endpoint we unconfigured, than reset also
432 * the event buffers; unless unconfiguring the ep due to lpm,
433 * in which case the event buffer only gets reset during the
434 * block reset.
435 */
436 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
437 !dbm_reset_ep_after_lpm(mdwc->dbm))
438 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
439
440 /*
441 * Call original complete function, notice that dwc->lock is already
442 * taken by the caller of this function (dwc3_gadget_giveback()).
443 */
444 request->complete = req_complete->orig_complete;
445 if (request->complete)
446 request->complete(ep, request);
447
448 kfree(req_complete);
449}
450
451
452/**
453* Helper function
454*
455* Reset DBM endpoint.
456*
457* @mdwc - pointer to dwc3_msm instance.
458* @dep - pointer to dwc3_ep instance.
459*
460* @return int - 0 on success, negative on error.
461*/
462static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
463{
464 int ret;
465
466 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
467
468 /* Reset the dbm endpoint */
469 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
470 if (ret) {
471 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
472 __func__);
473 return ret;
474 }
475
476 /*
477 * The necessary delay between asserting and deasserting the dbm ep
478 * reset is based on the number of active endpoints. If there is more
479 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
480 * delay will suffice.
481 */
482 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
483 usleep_range(1000, 1200);
484 else
485 udelay(10);
486 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
487 if (ret) {
488 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
489 __func__);
490 return ret;
491 }
492
493 return 0;
494}
495
496/**
497* Reset the DBM endpoint which is linked to the given USB endpoint.
498*
499* @usb_ep - pointer to usb_ep instance.
500*
501* @return int - 0 on success, negative on error.
502*/
503
504int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
505{
506 struct dwc3_ep *dep = to_dwc3_ep(ep);
507 struct dwc3 *dwc = dep->dwc;
508 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
509
510 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
511}
512EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
513
514
515/**
516* Helper function.
517* See the header of the dwc3_msm_ep_queue function.
518*
519* @dwc3_ep - pointer to dwc3_ep instance.
520* @req - pointer to dwc3_request instance.
521*
522* @return int - 0 on success, negative on error.
523*/
524static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
525{
526 struct dwc3_trb *trb;
527 struct dwc3_trb *trb_link;
528 struct dwc3_gadget_ep_cmd_params params;
529 u32 cmd;
530 int ret = 0;
531
532 /* We push the request to the dep->req_queued list to indicate that
533 * this request is issued with start transfer. The request will be out
534 * from this list in 2 cases. The first is that the transfer will be
535 * completed (not if the transfer is endless using a circular TRBs with
536 * with link TRB). The second case is an option to do stop stransfer,
537 * this can be initiated by the function driver when calling dequeue.
538 */
539 req->queued = true;
540 list_add_tail(&req->list, &dep->req_queued);
541
542 /* First, prepare a normal TRB, point to the fake buffer */
543 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
544 dep->free_slot++;
545 memset(trb, 0, sizeof(*trb));
546
547 req->trb = trb;
548 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
549 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
550 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
551 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
552 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
553
554 /* Second, prepare a Link TRB that points to the first TRB*/
555 trb_link = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
556 dep->free_slot++;
557 memset(trb_link, 0, sizeof(*trb_link));
558
559 trb_link->bpl = lower_32_bits(req->trb_dma);
560 trb_link->bph = DBM_TRB_BIT |
561 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
562 trb_link->size = 0;
563 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
564
565 /*
566 * Now start the transfer
567 */
568 memset(&params, 0, sizeof(params));
569 params.param0 = 0; /* TDAddr High */
570 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
571
572 /* DBM requires IOC to be set */
573 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
574 ret = dwc3_send_gadget_ep_cmd(dep->dwc, dep->number, cmd, &params);
575 if (ret < 0) {
576 dev_dbg(dep->dwc->dev,
577 "%s: failed to send STARTTRANSFER command\n",
578 __func__);
579
580 list_del(&req->list);
581 return ret;
582 }
583 dep->flags |= DWC3_EP_BUSY;
584 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep->dwc,
585 dep->number);
586
587 return ret;
588}
589
590/**
591* Queue a usb request to the DBM endpoint.
592* This function should be called after the endpoint
593* was enabled by the ep_enable.
594*
595* This function prepares special structure of TRBs which
596* is familiar with the DBM HW, so it will possible to use
597* this endpoint in DBM mode.
598*
599* The TRBs prepared by this function, is one normal TRB
600* which point to a fake buffer, followed by a link TRB
601* that points to the first TRB.
602*
603* The API of this function follow the regular API of
604* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
605*
606* @usb_ep - pointer to usb_ep instance.
607* @request - pointer to usb_request instance.
608* @gfp_flags - possible flags.
609*
610* @return int - 0 on success, negative on error.
611*/
612static int dwc3_msm_ep_queue(struct usb_ep *ep,
613 struct usb_request *request, gfp_t gfp_flags)
614{
615 struct dwc3_request *req = to_dwc3_request(request);
616 struct dwc3_ep *dep = to_dwc3_ep(ep);
617 struct dwc3 *dwc = dep->dwc;
618 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
619 struct dwc3_msm_req_complete *req_complete;
620 unsigned long flags;
621 int ret = 0, size;
622 u8 bam_pipe;
623 bool producer;
624 bool disable_wb;
625 bool internal_mem;
626 bool ioc;
627 bool superspeed;
628
629 if (!(request->udc_priv & MSM_SPS_MODE)) {
630 /* Not SPS mode, call original queue */
631 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
632 __func__);
633
634 return (mdwc->original_ep_ops[dep->number])->queue(ep,
635 request,
636 gfp_flags);
637 }
638
639 /* HW restriction regarding TRB size (8KB) */
640 if (req->request.length < 0x2000) {
641 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
642 return -EINVAL;
643 }
644
645 /*
646 * Override req->complete function, but before doing that,
647 * store it's original pointer in the req_complete_list.
648 */
649 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
650 if (!req_complete)
651 return -ENOMEM;
652
653 req_complete->req = request;
654 req_complete->orig_complete = request->complete;
655 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
656 request->complete = dwc3_msm_req_complete_func;
657
658 /*
659 * Configure the DBM endpoint
660 */
661 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
662 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
663 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
664 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
665 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
666
667 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
668 disable_wb, internal_mem, ioc);
669 if (ret < 0) {
670 dev_err(mdwc->dev,
671 "error %d after calling dbm_ep_config\n", ret);
672 return ret;
673 }
674
675 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
676 __func__, request, ep->name, request->length);
677 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
678 dbm_event_buffer_config(mdwc->dbm,
679 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
680 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
681 DWC3_GEVNTSIZ_SIZE(size));
682
683 /*
684 * We must obtain the lock of the dwc3 core driver,
685 * including disabling interrupts, so we will be sure
686 * that we are the only ones that configure the HW device
687 * core and ensure that we queuing the request will finish
688 * as soon as possible so we will release back the lock.
689 */
690 spin_lock_irqsave(&dwc->lock, flags);
691 if (!dep->endpoint.desc) {
692 dev_err(mdwc->dev,
693 "%s: trying to queue request %p to disabled ep %s\n",
694 __func__, request, ep->name);
695 ret = -EPERM;
696 goto err;
697 }
698
699 if (dep->number == 0 || dep->number == 1) {
700 dev_err(mdwc->dev,
701 "%s: trying to queue dbm request %p to control ep %s\n",
702 __func__, request, ep->name);
703 ret = -EPERM;
704 goto err;
705 }
706
707
708 if (dep->busy_slot != dep->free_slot || !list_empty(&dep->request_list)
709 || !list_empty(&dep->req_queued)) {
710 dev_err(mdwc->dev,
711 "%s: trying to queue dbm request %p tp ep %s\n",
712 __func__, request, ep->name);
713 ret = -EPERM;
714 goto err;
715 } else {
716 dep->busy_slot = 0;
717 dep->free_slot = 0;
718 }
719
720 ret = __dwc3_msm_ep_queue(dep, req);
721 if (ret < 0) {
722 dev_err(mdwc->dev,
723 "error %d after calling __dwc3_msm_ep_queue\n", ret);
724 goto err;
725 }
726
727 spin_unlock_irqrestore(&dwc->lock, flags);
728 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
729 dbm_set_speed(mdwc->dbm, (u8)superspeed);
730
731 return 0;
732
733err:
734 spin_unlock_irqrestore(&dwc->lock, flags);
735 kfree(req_complete);
736 return ret;
737}
738
739/*
740* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
741*
742* @usb_ep - pointer to usb_ep instance.
743*
744* @return int - XferRscIndex
745*/
746static inline int gsi_get_xfer_index(struct usb_ep *ep)
747{
748 struct dwc3_ep *dep = to_dwc3_ep(ep);
749
750 return dep->resource_index;
751}
752
753/*
754* Fills up the GSI channel information needed in call to IPA driver
755* for GSI channel creation.
756*
757* @usb_ep - pointer to usb_ep instance.
758* @ch_info - output parameter with requested channel info
759*/
760static void gsi_get_channel_info(struct usb_ep *ep,
761 struct gsi_channel_info *ch_info)
762{
763 struct dwc3_ep *dep = to_dwc3_ep(ep);
764 int last_trb_index = 0;
765 struct dwc3 *dwc = dep->dwc;
766 struct usb_gsi_request *request = ch_info->ch_req;
767
768 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
769 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
770 DWC3_DEPCMD(dep->number));
771 ch_info->depcmd_hi_addr = 0;
772
773 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
774 &dep->trb_pool[0]);
775 /* Convert to multipled of 1KB */
776 ch_info->const_buffer_size = request->buf_len/1024;
777
778 /* IN direction */
779 if (dep->direction) {
780 /*
781 * Multiply by size of each TRB for xfer_ring_len in bytes.
782 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
783 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
784 */
785 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
786 last_trb_index = 2 * request->num_bufs + 2;
787 } else { /* OUT direction */
788 /*
789 * Multiply by size of each TRB for xfer_ring_len in bytes.
790 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
791 * LINK TRB.
792 */
793 ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
794 last_trb_index = request->num_bufs + 1;
795 }
796
797 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
798 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
799 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
800 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
801 DWC3_GEVNTCOUNT(ep->ep_intr_num));
802 ch_info->gevntcount_hi_addr = 0;
803
804 dev_dbg(dwc->dev,
805 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
806 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
807 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
808}
809
810/*
811* Perform StartXfer on GSI EP. Stores XferRscIndex.
812*
813* @usb_ep - pointer to usb_ep instance.
814*
815* @return int - 0 on success
816*/
817static int gsi_startxfer_for_ep(struct usb_ep *ep)
818{
819 int ret;
820 struct dwc3_gadget_ep_cmd_params params;
821 u32 cmd;
822 struct dwc3_ep *dep = to_dwc3_ep(ep);
823 struct dwc3 *dwc = dep->dwc;
824
825 memset(&params, 0, sizeof(params));
826 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
827 params.param0 |= (ep->ep_intr_num << 16);
828 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
829 &dep->trb_pool[0]));
830 cmd = DWC3_DEPCMD_STARTTRANSFER;
831 cmd |= DWC3_DEPCMD_PARAM(0);
832 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
833
834 if (ret < 0)
835 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
836 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
837 dep->number);
838 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
839 return ret;
840}
841
842/*
843* Store Ring Base and Doorbell Address for GSI EP
844* for GSI channel creation.
845*
846* @usb_ep - pointer to usb_ep instance.
847* @dbl_addr - Doorbell address obtained from IPA driver
848*/
849static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
850{
851 struct dwc3_ep *dep = to_dwc3_ep(ep);
852 struct dwc3 *dwc = dep->dwc;
853 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
854 int n = ep->ep_intr_num - 1;
855
856 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
857 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
858 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
859
860 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
861 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
862 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
863 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
864}
865
866/*
867* Rings Doorbell for IN GSI Channel
868*
869* @usb_ep - pointer to usb_ep instance.
870* @request - pointer to GSI request. This is used to pass in the
871* address of the GSI doorbell obtained from IPA driver
872*/
873static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
874{
875 void __iomem *gsi_dbl_address_lsb;
876 void __iomem *gsi_dbl_address_msb;
877 dma_addr_t offset;
878 u64 dbl_addr = *((u64 *)request->buf_base_addr);
879 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
880 u32 dbl_hi_addr = (dbl_addr >> 32);
881 u32 num_trbs = (request->num_bufs * 2 + 2);
882 struct dwc3_ep *dep = to_dwc3_ep(ep);
883 struct dwc3 *dwc = dep->dwc;
884 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
885
886 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
887 dbl_lo_addr, sizeof(u32));
888 if (!gsi_dbl_address_lsb)
889 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
890
891 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
892 dbl_hi_addr, sizeof(u32));
893 if (!gsi_dbl_address_msb)
894 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
895
896 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
897 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
898 &offset, gsi_dbl_address_lsb, dbl_lo_addr);
899
900 writel_relaxed(offset, gsi_dbl_address_lsb);
901 writel_relaxed(0, gsi_dbl_address_msb);
902}
903
904/*
905* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
906*
907* @usb_ep - pointer to usb_ep instance.
908* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
909*
910* @return int - 0 on success
911*/
912static int gsi_updatexfer_for_ep(struct usb_ep *ep,
913 struct usb_gsi_request *request)
914{
915 int i;
916 int ret;
917 u32 cmd;
918 int num_trbs = request->num_bufs + 1;
919 struct dwc3_trb *trb;
920 struct dwc3_gadget_ep_cmd_params params;
921 struct dwc3_ep *dep = to_dwc3_ep(ep);
922 struct dwc3 *dwc = dep->dwc;
923
924 for (i = 0; i < num_trbs - 1; i++) {
925 trb = &dep->trb_pool[i];
926 trb->ctrl |= DWC3_TRB_CTRL_HWO;
927 }
928
929 memset(&params, 0, sizeof(params));
930 cmd = DWC3_DEPCMD_UPDATETRANSFER;
931 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
932 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
933 dep->flags |= DWC3_EP_BUSY;
934 if (ret < 0)
935 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
936 return ret;
937}
938
939/*
940* Perform EndXfer on particular GSI EP.
941*
942* @usb_ep - pointer to usb_ep instance.
943*/
944static void gsi_endxfer_for_ep(struct usb_ep *ep)
945{
946 struct dwc3_ep *dep = to_dwc3_ep(ep);
947 struct dwc3 *dwc = dep->dwc;
948
949 dwc3_stop_active_transfer(dwc, dep->number, true);
950}
951
952/*
953* Allocates and configures TRBs for GSI EPs.
954*
955* @usb_ep - pointer to usb_ep instance.
956* @request - pointer to GSI request.
957*
958* @return int - 0 on success
959*/
960static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
961{
962 int i = 0;
963 dma_addr_t buffer_addr = req->dma;
964 struct dwc3_ep *dep = to_dwc3_ep(ep);
965 struct dwc3 *dwc = dep->dwc;
966 struct dwc3_trb *trb;
967 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
968 : (req->num_bufs + 1);
969
970 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
971 num_trbs * sizeof(struct dwc3_trb),
972 num_trbs * sizeof(struct dwc3_trb), 0);
973 if (!dep->trb_dma_pool) {
974 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
975 dep->name);
976 return -ENOMEM;
977 }
978
979 dep->num_trbs = num_trbs;
980
981 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
982 GFP_KERNEL, &dep->trb_pool_dma);
983 if (!dep->trb_pool) {
984 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
985 dep->name);
986 return -ENOMEM;
987 }
988
989 /* IN direction */
990 if (dep->direction) {
991 for (i = 0; i < num_trbs ; i++) {
992 trb = &dep->trb_pool[i];
993 memset(trb, 0, sizeof(*trb));
994 /* Set up first n+1 TRBs for ZLPs */
995 if (i < (req->num_bufs + 1)) {
996 trb->bpl = 0;
997 trb->bph = 0;
998 trb->size = 0;
999 trb->ctrl = DWC3_TRBCTL_NORMAL
1000 | DWC3_TRB_CTRL_IOC;
1001 continue;
1002 }
1003
1004 /* Setup n TRBs pointing to valid buffers */
1005 trb->bpl = lower_32_bits(buffer_addr);
1006 trb->bph = 0;
1007 trb->size = 0;
1008 trb->ctrl = DWC3_TRBCTL_NORMAL
1009 | DWC3_TRB_CTRL_IOC;
1010 buffer_addr += req->buf_len;
1011
1012 /* Set up the Link TRB at the end */
1013 if (i == (num_trbs - 1)) {
1014 trb->bpl = dwc3_trb_dma_offset(dep,
1015 &dep->trb_pool[0]);
1016 trb->bph = (1 << 23) | (1 << 21)
1017 | (ep->ep_intr_num << 16);
1018 trb->size = 0;
1019 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1020 | DWC3_TRB_CTRL_HWO;
1021 }
1022 }
1023 } else { /* OUT direction */
1024
1025 for (i = 0; i < num_trbs ; i++) {
1026
1027 trb = &dep->trb_pool[i];
1028 memset(trb, 0, sizeof(*trb));
1029 trb->bpl = lower_32_bits(buffer_addr);
1030 trb->bph = 0;
1031 trb->size = req->buf_len;
1032 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
1033 | DWC3_TRB_CTRL_CSP
1034 | DWC3_TRB_CTRL_ISP_IMI;
1035 buffer_addr += req->buf_len;
1036
1037 /* Set up the Link TRB at the end */
1038 if (i == (num_trbs - 1)) {
1039 trb->bpl = dwc3_trb_dma_offset(dep,
1040 &dep->trb_pool[0]);
1041 trb->bph = (1 << 23) | (1 << 21)
1042 | (ep->ep_intr_num << 16);
1043 trb->size = 0;
1044 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1045 | DWC3_TRB_CTRL_HWO;
1046 }
1047 }
1048 }
1049 return 0;
1050}
1051
1052/*
1053* Frees TRBs for GSI EPs.
1054*
1055* @usb_ep - pointer to usb_ep instance.
1056*
1057*/
1058static void gsi_free_trbs(struct usb_ep *ep)
1059{
1060 struct dwc3_ep *dep = to_dwc3_ep(ep);
1061
1062 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1063 return;
1064
1065 /* Free TRBs and TRB pool for EP */
1066 if (dep->trb_dma_pool) {
1067 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1068 dep->trb_pool_dma);
1069 dma_pool_destroy(dep->trb_dma_pool);
1070 dep->trb_pool = NULL;
1071 dep->trb_pool_dma = 0;
1072 dep->trb_dma_pool = NULL;
1073 }
1074}
1075/*
1076* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1077*
1078* @usb_ep - pointer to usb_ep instance.
1079* @request - pointer to GSI request.
1080*/
1081static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1082{
1083 struct dwc3_ep *dep = to_dwc3_ep(ep);
1084 struct dwc3 *dwc = dep->dwc;
1085 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1086 struct dwc3_gadget_ep_cmd_params params;
1087 const struct usb_endpoint_descriptor *desc = ep->desc;
1088 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
1089 u32 reg;
1090
1091 memset(&params, 0x00, sizeof(params));
1092
1093 /* Configure GSI EP */
1094 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1095 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1096
1097 /* Burst size is only needed in SuperSpeed mode */
1098 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1099 u32 burst = dep->endpoint.maxburst - 1;
1100
1101 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1102 }
1103
1104 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1105 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1106 | DWC3_DEPCFG_STREAM_EVENT_EN;
1107 dep->stream_capable = true;
1108 }
1109
1110 /* Set EP number */
1111 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1112
1113 /* Set interrupter number for GSI endpoints */
1114 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1115
1116 /* Enable XferInProgress and XferComplete Interrupts */
1117 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1118 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1119 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1120 /*
1121 * We must use the lower 16 TX FIFOs even though
1122 * HW might have more
1123 */
1124 /* Remove FIFO Number for GSI EP*/
1125 if (dep->direction)
1126 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1127
1128 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1129
1130 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1131 params.param0, params.param1, params.param2, dep->name);
1132
1133 dwc3_send_gadget_ep_cmd(dwc, dep->number,
1134 DWC3_DEPCMD_SETEPCONFIG, &params);
1135
1136 /* Set XferRsc Index for GSI EP */
1137 if (!(dep->flags & DWC3_EP_ENABLED)) {
1138 memset(&params, 0x00, sizeof(params));
1139 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
1140 dwc3_send_gadget_ep_cmd(dwc, dep->number,
1141 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1142
1143 dep->endpoint.desc = desc;
1144 dep->comp_desc = comp_desc;
1145 dep->type = usb_endpoint_type(desc);
1146 dep->flags |= DWC3_EP_ENABLED;
1147 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1148 reg |= DWC3_DALEPENA_EP(dep->number);
1149 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1150 }
1151
1152}
1153
1154/*
1155* Enables USB wrapper for GSI
1156*
1157* @usb_ep - pointer to usb_ep instance.
1158*/
1159static void gsi_enable(struct usb_ep *ep)
1160{
1161 struct dwc3_ep *dep = to_dwc3_ep(ep);
1162 struct dwc3 *dwc = dep->dwc;
1163 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1164
1165 dwc3_msm_write_reg_field(mdwc->base,
1166 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1167 dwc3_msm_write_reg_field(mdwc->base,
1168 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1169 dwc3_msm_write_reg_field(mdwc->base,
1170 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1171 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1172 dwc3_msm_write_reg_field(mdwc->base,
1173 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1174}
1175
1176/*
1177* Block or allow doorbell towards GSI
1178*
1179* @usb_ep - pointer to usb_ep instance.
1180* @request - pointer to GSI request. In this case num_bufs is used as a bool
1181* to set or clear the doorbell bit
1182*/
1183static void gsi_set_clear_dbell(struct usb_ep *ep,
1184 bool block_db)
1185{
1186
1187 struct dwc3_ep *dep = to_dwc3_ep(ep);
1188 struct dwc3 *dwc = dep->dwc;
1189 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1190
1191 dwc3_msm_write_reg_field(mdwc->base,
1192 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1193}
1194
1195/*
1196* Performs necessary checks before stopping GSI channels
1197*
1198* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1199*/
1200static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1201{
1202 u32 timeout = 1500;
1203 u32 reg = 0;
1204 struct dwc3_ep *dep = to_dwc3_ep(ep);
1205 struct dwc3 *dwc = dep->dwc;
1206 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1207
1208 while (dwc3_msm_read_reg_field(mdwc->base,
1209 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1210 if (!timeout--) {
1211 dev_err(mdwc->dev,
1212 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1213 return false;
1214 }
1215 }
1216 /* Check for U3 only if we are not handling Function Suspend */
1217 if (!f_suspend) {
1218 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1219 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1220 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1221 return false;
1222 }
1223 }
1224
1225 return true;
1226}
1227
1228
1229/**
1230* Performs GSI operations or GSI EP related operations.
1231*
1232* @usb_ep - pointer to usb_ep instance.
1233* @op_data - pointer to opcode related data.
1234* @op - GSI related or GSI EP related op code.
1235*
1236* @return int - 0 on success, negative on error.
1237* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1238*/
1239static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1240 void *op_data, enum gsi_ep_op op)
1241{
1242 u32 ret = 0;
1243 struct dwc3_ep *dep = to_dwc3_ep(ep);
1244 struct dwc3 *dwc = dep->dwc;
1245 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1246 struct usb_gsi_request *request;
1247 struct gsi_channel_info *ch_info;
1248 bool block_db, f_suspend;
1249
1250 switch (op) {
1251 case GSI_EP_OP_PREPARE_TRBS:
1252 request = (struct usb_gsi_request *)op_data;
1253 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1254 ret = gsi_prepare_trbs(ep, request);
1255 break;
1256 case GSI_EP_OP_FREE_TRBS:
1257 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1258 gsi_free_trbs(ep);
1259 break;
1260 case GSI_EP_OP_CONFIG:
1261 request = (struct usb_gsi_request *)op_data;
1262 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
1263 gsi_configure_ep(ep, request);
1264 break;
1265 case GSI_EP_OP_STARTXFER:
1266 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
1267 ret = gsi_startxfer_for_ep(ep);
1268 break;
1269 case GSI_EP_OP_GET_XFER_IDX:
1270 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1271 ret = gsi_get_xfer_index(ep);
1272 break;
1273 case GSI_EP_OP_STORE_DBL_INFO:
1274 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1275 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1276 break;
1277 case GSI_EP_OP_ENABLE_GSI:
1278 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1279 gsi_enable(ep);
1280 break;
1281 case GSI_EP_OP_GET_CH_INFO:
1282 ch_info = (struct gsi_channel_info *)op_data;
1283 gsi_get_channel_info(ep, ch_info);
1284 break;
1285 case GSI_EP_OP_RING_IN_DB:
1286 request = (struct usb_gsi_request *)op_data;
1287 dev_dbg(mdwc->dev, "RING IN EP DB\n");
1288 gsi_ring_in_db(ep, request);
1289 break;
1290 case GSI_EP_OP_UPDATEXFER:
1291 request = (struct usb_gsi_request *)op_data;
1292 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
1293 ret = gsi_updatexfer_for_ep(ep, request);
1294 break;
1295 case GSI_EP_OP_ENDXFER:
1296 request = (struct usb_gsi_request *)op_data;
1297 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
1298 gsi_endxfer_for_ep(ep);
1299 break;
1300 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1301 block_db = *((bool *)op_data);
1302 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1303 block_db);
1304 gsi_set_clear_dbell(ep, block_db);
1305 break;
1306 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1307 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1308 f_suspend = *((bool *)op_data);
1309 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1310 break;
1311 case GSI_EP_OP_DISABLE:
1312 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1313 ret = ep->ops->disable(ep);
1314 break;
1315 default:
1316 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1317 }
1318
1319 return ret;
1320}
1321
1322/**
1323 * Configure MSM endpoint.
1324 * This function do specific configurations
1325 * to an endpoint which need specific implementaion
1326 * in the MSM architecture.
1327 *
1328 * This function should be called by usb function/class
1329 * layer which need a support from the specific MSM HW
1330 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1331 *
1332 * @ep - a pointer to some usb_ep instance
1333 *
1334 * @return int - 0 on success, negetive on error.
1335 */
1336int msm_ep_config(struct usb_ep *ep)
1337{
1338 struct dwc3_ep *dep = to_dwc3_ep(ep);
1339 struct dwc3 *dwc = dep->dwc;
1340 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1341 struct usb_ep_ops *new_ep_ops;
1342
1343
1344 /* Save original ep ops for future restore*/
1345 if (mdwc->original_ep_ops[dep->number]) {
1346 dev_err(mdwc->dev,
1347 "ep [%s,%d] already configured as msm endpoint\n",
1348 ep->name, dep->number);
1349 return -EPERM;
1350 }
1351 mdwc->original_ep_ops[dep->number] = ep->ops;
1352
1353 /* Set new usb ops as we like */
1354 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1355 if (!new_ep_ops)
1356 return -ENOMEM;
1357
1358 (*new_ep_ops) = (*ep->ops);
1359 new_ep_ops->queue = dwc3_msm_ep_queue;
1360 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1361 ep->ops = new_ep_ops;
1362
1363 /*
1364 * Do HERE more usb endpoint configurations
1365 * which are specific to MSM.
1366 */
1367
1368 return 0;
1369}
1370EXPORT_SYMBOL(msm_ep_config);
1371
1372/**
1373 * Un-configure MSM endpoint.
1374 * Tear down configurations done in the
1375 * dwc3_msm_ep_config function.
1376 *
1377 * @ep - a pointer to some usb_ep instance
1378 *
1379 * @return int - 0 on success, negative on error.
1380 */
1381int msm_ep_unconfig(struct usb_ep *ep)
1382{
1383 struct dwc3_ep *dep = to_dwc3_ep(ep);
1384 struct dwc3 *dwc = dep->dwc;
1385 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1386 struct usb_ep_ops *old_ep_ops;
1387
1388 /* Restore original ep ops */
1389 if (!mdwc->original_ep_ops[dep->number]) {
1390 dev_err(mdwc->dev,
1391 "ep [%s,%d] was not configured as msm endpoint\n",
1392 ep->name, dep->number);
1393 return -EINVAL;
1394 }
1395 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1396 ep->ops = mdwc->original_ep_ops[dep->number];
1397 mdwc->original_ep_ops[dep->number] = NULL;
1398 kfree(old_ep_ops);
1399
1400 /*
1401 * Do HERE more usb endpoint un-configurations
1402 * which are specific to MSM.
1403 */
1404
1405 return 0;
1406}
1407EXPORT_SYMBOL(msm_ep_unconfig);
1408#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1409
1410static void dwc3_resume_work(struct work_struct *w);
1411
1412static void dwc3_restart_usb_work(struct work_struct *w)
1413{
1414 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1415 restart_usb_work);
1416 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1417 unsigned int timeout = 50;
1418
1419 dev_dbg(mdwc->dev, "%s\n", __func__);
1420
1421 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1422 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1423 return;
1424 }
1425
1426 /* guard against concurrent VBUS handling */
1427 mdwc->in_restart = true;
1428
1429 if (!mdwc->vbus_active) {
1430 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1431 dwc->err_evt_seen = false;
1432 mdwc->in_restart = false;
1433 return;
1434 }
1435
1436 dbg_event(0xFF, "RestartUSB", 0);
1437
1438 /* Reset active USB connection */
1439 dwc3_resume_work(&mdwc->resume_work);
1440
1441 /* Make sure disconnect is processed before sending connect */
1442 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1443 msleep(20);
1444
1445 if (!timeout) {
1446 dev_dbg(mdwc->dev,
1447 "Not in LPM after disconnect, forcing suspend...\n");
1448 dbg_event(0xFF, "ReStart:RT SUSP",
1449 atomic_read(&mdwc->dev->power.usage_count));
1450 pm_runtime_suspend(mdwc->dev);
1451 }
1452
1453 /* Force reconnect only if cable is still connected */
1454 if (mdwc->vbus_active) {
1455 mdwc->in_restart = false;
1456 dwc3_resume_work(&mdwc->resume_work);
1457 }
1458
1459 dwc->err_evt_seen = false;
1460 flush_delayed_work(&mdwc->sm_work);
1461}
1462
1463/*
1464 * Check whether the DWC3 requires resetting the ep
1465 * after going to Low Power Mode (lpm)
1466 */
1467bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1468{
1469 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1470 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1471
1472 return dbm_reset_ep_after_lpm(mdwc->dbm);
1473}
1474EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1475
1476/*
1477 * Config Global Distributed Switch Controller (GDSC)
1478 * to support controller power collapse
1479 */
1480static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1481{
1482 int ret;
1483
1484 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1485 return -EPERM;
1486
1487 if (on) {
1488 ret = regulator_enable(mdwc->dwc3_gdsc);
1489 if (ret) {
1490 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1491 return ret;
1492 }
1493 } else {
1494 ret = regulator_disable(mdwc->dwc3_gdsc);
1495 if (ret) {
1496 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1497 return ret;
1498 }
1499 }
1500
1501 return ret;
1502}
1503
1504static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1505{
1506 int ret = 0;
1507
1508 if (assert) {
1509 disable_irq(mdwc->pwr_event_irq);
1510 /* Using asynchronous block reset to the hardware */
1511 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1512 clk_disable_unprepare(mdwc->utmi_clk);
1513 clk_disable_unprepare(mdwc->sleep_clk);
1514 clk_disable_unprepare(mdwc->core_clk);
1515 clk_disable_unprepare(mdwc->iface_clk);
1516 ret = clk_reset(mdwc->core_clk, CLK_RESET_ASSERT);
1517 if (ret)
1518 dev_err(mdwc->dev, "dwc3 core_clk assert failed\n");
1519 } else {
1520 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
1521 ret = clk_reset(mdwc->core_clk, CLK_RESET_DEASSERT);
1522 ndelay(200);
1523 clk_prepare_enable(mdwc->iface_clk);
1524 clk_prepare_enable(mdwc->core_clk);
1525 clk_prepare_enable(mdwc->sleep_clk);
1526 clk_prepare_enable(mdwc->utmi_clk);
1527 if (ret)
1528 dev_err(mdwc->dev, "dwc3 core_clk deassert failed\n");
1529 enable_irq(mdwc->pwr_event_irq);
1530 }
1531
1532 return ret;
1533}
1534
1535static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1536{
1537 u32 guctl, gfladj = 0;
1538
1539 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1540 guctl &= ~DWC3_GUCTL_REFCLKPER;
1541
1542 /* GFLADJ register is used starting with revision 2.50a */
1543 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1544 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1545 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1546 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1547 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1548 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1549 }
1550
1551 /* Refer to SNPS Databook Table 6-55 for calculations used */
1552 switch (mdwc->utmi_clk_rate) {
1553 case 19200000:
1554 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1555 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1556 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1557 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1558 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1559 break;
1560 case 24000000:
1561 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1562 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1563 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1564 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1565 break;
1566 default:
1567 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1568 mdwc->utmi_clk_rate);
1569 break;
1570 }
1571
1572 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1573 if (gfladj)
1574 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1575}
1576
1577/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1578static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1579{
1580 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1581 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1582 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1583 BIT(2), 1);
1584
1585 /*
1586 * Enable master clock for RAMs to allow BAM to access RAMs when
1587 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1588 * are seen where RAM clocks get turned OFF in SS mode
1589 */
1590 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1591 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1592
1593}
1594
1595static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1596{
1597 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1598 u32 reg;
1599
1600 if (dwc->revision < DWC3_REVISION_230A)
1601 return;
1602
1603 switch (event) {
1604 case DWC3_CONTROLLER_ERROR_EVENT:
1605 dev_info(mdwc->dev,
1606 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1607 dwc->irq_cnt);
1608
1609 dwc3_gadget_disable_irq(dwc);
1610
1611 /* prevent core from generating interrupts until recovery */
1612 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1613 reg |= DWC3_GCTL_CORESOFTRESET;
1614 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1615
1616 /* restart USB which performs full reset and reconnect */
1617 schedule_work(&mdwc->restart_usb_work);
1618 break;
1619 case DWC3_CONTROLLER_RESET_EVENT:
1620 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1621 /* HS & SSPHYs get reset as part of core soft reset */
1622 dwc3_msm_qscratch_reg_init(mdwc);
1623 break;
1624 case DWC3_CONTROLLER_POST_RESET_EVENT:
1625 dev_dbg(mdwc->dev,
1626 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1627
1628 /*
1629 * Below sequence is used when controller is working without
1630 * having ssphy and only USB high speed is supported.
1631 */
1632 if (dwc->maximum_speed == USB_SPEED_HIGH) {
1633 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1634 dwc3_msm_read_reg(mdwc->base,
1635 QSCRATCH_GENERAL_CFG)
1636 | PIPE_UTMI_CLK_DIS);
1637
1638 usleep_range(2, 5);
1639
1640
1641 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1642 dwc3_msm_read_reg(mdwc->base,
1643 QSCRATCH_GENERAL_CFG)
1644 | PIPE_UTMI_CLK_SEL
1645 | PIPE3_PHYSTATUS_SW);
1646
1647 usleep_range(2, 5);
1648
1649 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1650 dwc3_msm_read_reg(mdwc->base,
1651 QSCRATCH_GENERAL_CFG)
1652 & ~PIPE_UTMI_CLK_DIS);
1653 }
1654
1655 dwc3_msm_update_ref_clk(mdwc);
1656 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1657 break;
1658 case DWC3_CONTROLLER_CONNDONE_EVENT:
1659 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1660 /*
1661 * Add power event if the dbm indicates coming out of L1 by
1662 * interrupt
1663 */
1664 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1665 dwc3_msm_write_reg_field(mdwc->base,
1666 PWR_EVNT_IRQ_MASK_REG,
1667 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1668
1669 atomic_set(&dwc->in_lpm, 0);
1670 break;
1671 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1672 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1673 if (dwc->enable_bus_suspend) {
1674 mdwc->suspend = dwc->b_suspend;
1675 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1676 }
1677 break;
1678 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1679 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
1680 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1681 break;
1682 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1683 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
1684 dwc3_restart_usb_work(&mdwc->restart_usb_work);
1685 break;
1686 default:
1687 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1688 break;
1689 }
1690}
1691
1692static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1693{
1694 int ret = 0;
1695
1696 if (core_reset) {
1697 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1698 if (ret)
1699 return;
1700
1701 usleep_range(1000, 1200);
1702 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1703 if (ret)
1704 return;
1705
1706 usleep_range(10000, 12000);
1707 }
1708
1709 if (mdwc->dbm) {
1710 /* Reset the DBM */
1711 dbm_soft_reset(mdwc->dbm, 1);
1712 usleep_range(1000, 1200);
1713 dbm_soft_reset(mdwc->dbm, 0);
1714
1715 /*enable DBM*/
1716 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1717 DBM_EN_MASK, 0x1);
1718 dbm_enable(mdwc->dbm);
1719 }
1720}
1721
1722static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1723{
1724 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1725 u32 val;
1726
1727 /* Configure AHB2PHY for one wait state read/write */
1728 if (mdwc->ahb2phy_base) {
1729 clk_prepare_enable(mdwc->cfg_ahb_clk);
1730 val = readl_relaxed(mdwc->ahb2phy_base +
1731 PERIPH_SS_AHB2PHY_TOP_CFG);
1732 if (val != ONE_READ_WRITE_WAIT) {
1733 writel_relaxed(ONE_READ_WRITE_WAIT,
1734 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1735 /* complete above write before configuring USB PHY. */
1736 mb();
1737 }
1738 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1739 }
1740
1741 if (!mdwc->init) {
1742 dbg_event(0xFF, "dwc3 init",
1743 atomic_read(&mdwc->dev->power.usage_count));
1744 dwc3_core_pre_init(dwc);
1745 mdwc->init = true;
1746 }
1747
1748 dwc3_core_init(dwc);
1749 /* Re-configure event buffers */
1750 dwc3_event_buffers_setup(dwc);
1751}
1752
1753static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1754{
1755 unsigned long timeout;
1756 u32 reg = 0;
1757
1758 if ((mdwc->in_host_mode || mdwc->vbus_active)
1759 && dwc3_msm_is_superspeed(mdwc)) {
1760 if (!atomic_read(&mdwc->in_p3)) {
1761 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
1762 return -EBUSY;
1763 }
1764 }
1765
1766 /* Clear previous L2 events */
1767 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1768 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
1769
1770 /* Prepare HSPHY for suspend */
1771 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
1772 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
1773 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
1774
1775 /* Wait for PHY to go into L2 */
1776 timeout = jiffies + msecs_to_jiffies(5);
1777 while (!time_after(jiffies, timeout)) {
1778 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
1779 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
1780 break;
1781 }
1782 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
1783 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
1784
1785 /* Clear L2 event bit */
1786 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
1787 PWR_EVNT_LPM_IN_L2_MASK);
1788
1789 return 0;
1790}
1791
1792static void dwc3_msm_bus_vote_w(struct work_struct *w)
1793{
1794 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
1795 int ret;
1796
1797 ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
1798 mdwc->bus_vote);
1799 if (ret)
1800 dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
1801}
1802
1803static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
1804{
1805 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1806 int i, num_ports;
1807 u32 reg;
1808
1809 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
1810 if (mdwc->in_host_mode) {
1811 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
1812 num_ports = HCS_MAX_PORTS(reg);
1813 for (i = 0; i < num_ports; i++) {
1814 reg = dwc3_msm_read_reg(mdwc->base,
1815 USB3_PORTSC + i*0x10);
1816 if (reg & PORT_PE) {
1817 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
1818 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1819 else if (DEV_LOWSPEED(reg))
1820 mdwc->hs_phy->flags |= PHY_LS_MODE;
1821 }
1822 }
1823 } else {
1824 if (dwc->gadget.speed == USB_SPEED_HIGH ||
1825 dwc->gadget.speed == USB_SPEED_FULL)
1826 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
1827 else if (dwc->gadget.speed == USB_SPEED_LOW)
1828 mdwc->hs_phy->flags |= PHY_LS_MODE;
1829 }
1830}
1831
1832
1833static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
1834{
1835 int ret, i;
1836 bool can_suspend_ssphy;
1837 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1838
1839 dbg_event(0xFF, "Ctl Sus", atomic_read(&dwc->in_lpm));
1840
1841 if (atomic_read(&dwc->in_lpm)) {
1842 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
1843 return 0;
1844 }
1845
1846 if (!mdwc->in_host_mode) {
1847 /* pending device events unprocessed */
1848 for (i = 0; i < dwc->num_event_buffers; i++) {
1849 struct dwc3_event_buffer *evt = dwc->ev_buffs[i];
1850
1851 if ((evt->flags & DWC3_EVENT_PENDING)) {
1852 dev_dbg(mdwc->dev,
1853 "%s: %d device events pending, abort suspend\n",
1854 __func__, evt->count / 4);
1855 dbg_print_reg("PENDING DEVICE EVENT",
1856 *(u32 *)(evt->buf + evt->lpos));
1857 return -EBUSY;
1858 }
1859 }
1860 }
1861
1862 if (!mdwc->vbus_active && dwc->is_drd &&
1863 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
1864 /*
1865 * In some cases, the pm_runtime_suspend may be called by
1866 * usb_bam when there is pending lpm flag. However, if this is
1867 * done when cable was disconnected and otg state has not
1868 * yet changed to IDLE, then it means OTG state machine
1869 * is running and we race against it. So cancel LPM for now,
1870 * and OTG state machine will go for LPM later, after completing
1871 * transition to IDLE state.
1872 */
1873 dev_dbg(mdwc->dev,
1874 "%s: cable disconnected while not in idle otg state\n",
1875 __func__);
1876 return -EBUSY;
1877 }
1878
1879 /*
1880 * Check if device is not in CONFIGURED state
1881 * then check controller state of L2 and break
1882 * LPM sequence. Check this for device bus suspend case.
1883 */
1884 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
1885 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
1886 pr_err("%s(): Trying to go in LPM with state:%d\n",
1887 __func__, dwc->gadget.state);
1888 pr_err("%s(): LPM is not performed.\n", __func__);
1889 return -EBUSY;
1890 }
1891
1892 ret = dwc3_msm_prepare_suspend(mdwc);
1893 if (ret)
1894 return ret;
1895
1896 /* Initialize variables here */
1897 can_suspend_ssphy = !(mdwc->in_host_mode &&
1898 dwc3_msm_is_host_superspeed(mdwc));
1899
1900 /* Disable core irq */
1901 if (dwc->irq)
1902 disable_irq(dwc->irq);
1903
1904 /* disable power event irq, hs and ss phy irq is used as wake up src */
1905 disable_irq(mdwc->pwr_event_irq);
1906
1907 dwc3_set_phy_speed_flags(mdwc);
1908 /* Suspend HS PHY */
1909 usb_phy_set_suspend(mdwc->hs_phy, 1);
1910
1911 /* Suspend SS PHY */
1912 if (can_suspend_ssphy) {
1913 /* indicate phy about SS mode */
1914 if (dwc3_msm_is_superspeed(mdwc))
1915 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
1916 usb_phy_set_suspend(mdwc->ss_phy, 1);
1917 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
1918 }
1919
1920 /* make sure above writes are completed before turning off clocks */
1921 wmb();
1922
1923 /* Disable clocks */
1924 if (mdwc->bus_aggr_clk)
1925 clk_disable_unprepare(mdwc->bus_aggr_clk);
1926 clk_disable_unprepare(mdwc->utmi_clk);
1927
1928 clk_set_rate(mdwc->core_clk, 19200000);
1929 clk_disable_unprepare(mdwc->core_clk);
1930 /*
1931 * Disable iface_clk only after core_clk as core_clk has FSM
1932 * depedency on iface_clk. Hence iface_clk should be turned off
1933 * after core_clk is turned off.
1934 */
1935 clk_disable_unprepare(mdwc->iface_clk);
1936 /* USB PHY no more requires TCXO */
1937 clk_disable_unprepare(mdwc->xo_clk);
1938
1939 /* Perform controller power collapse */
1940 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
1941 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
1942 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
1943 dwc3_msm_config_gdsc(mdwc, 0);
1944 clk_disable_unprepare(mdwc->sleep_clk);
1945 }
1946
1947 /* Remove bus voting */
1948 if (mdwc->bus_perf_client) {
1949 mdwc->bus_vote = 0;
1950 schedule_work(&mdwc->bus_vote_w);
1951 }
1952
1953 /*
1954 * release wakeup source with timeout to defer system suspend to
1955 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
1956 * event is received.
1957 */
1958 if (mdwc->lpm_to_suspend_delay) {
1959 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
1960 mdwc->lpm_to_suspend_delay);
1961 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
1962 } else {
1963 pm_relax(mdwc->dev);
1964 }
1965
1966 atomic_set(&dwc->in_lpm, 1);
1967
1968 /*
1969 * with DCP or during cable disconnect, we dont require wakeup
1970 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
1971 * case of host bus suspend and device bus suspend.
1972 */
1973 if (mdwc->vbus_active || mdwc->in_host_mode) {
1974 enable_irq_wake(mdwc->hs_phy_irq);
1975 enable_irq(mdwc->hs_phy_irq);
1976 if (mdwc->ss_phy_irq) {
1977 enable_irq_wake(mdwc->ss_phy_irq);
1978 enable_irq(mdwc->ss_phy_irq);
1979 }
1980 /*
1981 * Enable power event irq during bus suspend in host mode for
1982 * mapping MPM pin for DP so that wakeup can happen in system
1983 * suspend.
1984 */
1985 if (mdwc->in_host_mode) {
1986 enable_irq(mdwc->pwr_event_irq);
1987 enable_irq_wake(mdwc->pwr_event_irq);
1988 }
1989 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
1990 }
1991
1992 dev_info(mdwc->dev, "DWC3 in low power mode\n");
1993 return 0;
1994}
1995
1996static int dwc3_msm_resume(struct dwc3_msm *mdwc)
1997{
1998 int ret;
1999 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2000
2001 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2002
2003 if (!atomic_read(&dwc->in_lpm)) {
2004 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2005 return 0;
2006 }
2007
2008 pm_stay_awake(mdwc->dev);
2009
2010 /* Enable bus voting */
2011 if (mdwc->bus_perf_client) {
2012 mdwc->bus_vote = 1;
2013 schedule_work(&mdwc->bus_vote_w);
2014 }
2015
2016 /* Vote for TCXO while waking up USB HSPHY */
2017 ret = clk_prepare_enable(mdwc->xo_clk);
2018 if (ret)
2019 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2020 __func__, ret);
2021
2022 /* Restore controller power collapse */
2023 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2024 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2025 dwc3_msm_config_gdsc(mdwc, 1);
2026
2027 clk_reset(mdwc->core_clk, CLK_RESET_ASSERT);
2028 /* HW requires a short delay for reset to take place properly */
2029 usleep_range(1000, 1200);
2030 clk_reset(mdwc->core_clk, CLK_RESET_DEASSERT);
2031 clk_prepare_enable(mdwc->sleep_clk);
2032 }
2033
2034 /*
2035 * Enable clocks
2036 * Turned ON iface_clk before core_clk due to FSM depedency.
2037 */
2038 clk_prepare_enable(mdwc->iface_clk);
2039 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2040 clk_prepare_enable(mdwc->core_clk);
2041 clk_prepare_enable(mdwc->utmi_clk);
2042 if (mdwc->bus_aggr_clk)
2043 clk_prepare_enable(mdwc->bus_aggr_clk);
2044
2045 /* Resume SS PHY */
2046 if (mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
2047 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2048 if (mdwc->typec_orientation == ORIENTATION_CC1)
2049 mdwc->ss_phy->flags |= PHY_LANE_A;
2050 if (mdwc->typec_orientation == ORIENTATION_CC2)
2051 mdwc->ss_phy->flags |= PHY_LANE_B;
2052 usb_phy_set_suspend(mdwc->ss_phy, 0);
2053 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2054 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2055 }
2056
2057 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2058 /* Resume HS PHY */
2059 usb_phy_set_suspend(mdwc->hs_phy, 0);
2060
2061 /* Recover from controller power collapse */
2062 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2063 u32 tmp;
2064
2065 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2066
2067 dwc3_msm_power_collapse_por(mdwc);
2068
2069 /* Get initial P3 status and enable IN_P3 event */
2070 tmp = dwc3_msm_read_reg_field(mdwc->base,
2071 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2072 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2073 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2074 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2075
2076 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2077 }
2078
2079 atomic_set(&dwc->in_lpm, 0);
2080
2081 /* Disable HSPHY auto suspend */
2082 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2083 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2084 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2085 DWC3_GUSB2PHYCFG_SUSPHY));
2086
2087 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2088 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
2089 disable_irq_wake(mdwc->hs_phy_irq);
2090 disable_irq_nosync(mdwc->hs_phy_irq);
2091 if (mdwc->ss_phy_irq) {
2092 disable_irq_wake(mdwc->ss_phy_irq);
2093 disable_irq_nosync(mdwc->ss_phy_irq);
2094 }
2095 if (mdwc->in_host_mode) {
2096 disable_irq_wake(mdwc->pwr_event_irq);
2097 disable_irq(mdwc->pwr_event_irq);
2098 }
2099 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2100 }
2101
2102 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2103
2104 /* enable power evt irq for IN P3 detection */
2105 enable_irq(mdwc->pwr_event_irq);
2106
2107 /* Enable core irq */
2108 if (dwc->irq)
2109 enable_irq(dwc->irq);
2110
2111 /*
2112 * Handle other power events that could not have been handled during
2113 * Low Power Mode
2114 */
2115 dwc3_pwr_event_handler(mdwc);
2116
2117 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
2118
2119 return 0;
2120}
2121
2122/**
2123 * dwc3_ext_event_notify - callback to handle events from external transceiver
2124 *
2125 * Returns 0 on success
2126 */
2127static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2128{
2129 /* Flush processing any pending events before handling new ones */
2130 flush_delayed_work(&mdwc->sm_work);
2131
2132 if (mdwc->id_state == DWC3_ID_FLOAT) {
2133 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2134 set_bit(ID, &mdwc->inputs);
2135 } else {
2136 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2137 clear_bit(ID, &mdwc->inputs);
2138 }
2139
2140 if (mdwc->vbus_active && !mdwc->in_restart) {
2141 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2142 set_bit(B_SESS_VLD, &mdwc->inputs);
2143 } else {
2144 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2145 clear_bit(B_SESS_VLD, &mdwc->inputs);
2146 }
2147
2148 if (mdwc->suspend) {
2149 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2150 set_bit(B_SUSPEND, &mdwc->inputs);
2151 } else {
2152 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2153 clear_bit(B_SUSPEND, &mdwc->inputs);
2154 }
2155
2156 schedule_delayed_work(&mdwc->sm_work, 0);
2157}
2158
2159static void dwc3_resume_work(struct work_struct *w)
2160{
2161 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
2162 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2163
2164 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2165
2166 /*
2167 * exit LPM first to meet resume timeline from device side.
2168 * resume_pending flag would prevent calling
2169 * dwc3_msm_resume() in case we are here due to system
2170 * wide resume without usb cable connected. This flag is set
2171 * only in case of power event irq in lpm.
2172 */
2173 if (mdwc->resume_pending) {
2174 dwc3_msm_resume(mdwc);
2175 mdwc->resume_pending = false;
2176 }
2177
2178 if (atomic_read(&mdwc->pm_suspended)) {
2179 dbg_event(0xFF, "RWrk PMSus", 0);
2180 /* let pm resume kick in resume work later */
2181 return;
2182 }
2183
2184 dbg_event(0xFF, "RWrk", dwc->is_drd);
2185 dwc3_ext_event_notify(mdwc);
2186}
2187
2188static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2189{
2190 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2191 u32 irq_stat, irq_clear = 0;
2192
2193 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2194 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2195
2196 /* Check for P3 events */
2197 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2198 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2199 /* Can't tell if entered or exit P3, so check LINKSTATE */
2200 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2201 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2202 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2203 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2204
2205 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2206 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2207 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2208 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2209 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2210 atomic_set(&mdwc->in_p3, 0);
2211 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2212 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2213 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2214 atomic_set(&mdwc->in_p3, 1);
2215 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2216 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2217 }
2218
2219 /* Clear L2 exit */
2220 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2221 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2222 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2223 }
2224
2225 /* Handle exit from L1 events */
2226 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2227 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2228 __func__);
2229 if (usb_gadget_wakeup(&dwc->gadget))
2230 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2231 __func__);
2232 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2233 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2234 }
2235
2236 /* Unhandled events */
2237 if (irq_stat)
2238 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2239 __func__, irq_stat);
2240
2241 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2242}
2243
2244static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2245{
2246 struct dwc3_msm *mdwc = _mdwc;
2247 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2248
2249 dev_dbg(mdwc->dev, "%s\n", __func__);
2250
2251 if (atomic_read(&dwc->in_lpm))
2252 dwc3_resume_work(&mdwc->resume_work);
2253 else
2254 dwc3_pwr_event_handler(mdwc);
2255
2256 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
2257
2258 return IRQ_HANDLED;
2259}
2260
2261static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2262{
2263 struct dwc3_msm *mdwc = data;
2264 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2265
2266 dwc->t_pwr_evt_irq = ktime_get();
2267 dev_dbg(mdwc->dev, "%s received\n", __func__);
2268 /*
2269 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2270 * which interrupts have been triggered, as the clocks are disabled.
2271 * Resume controller by waking up pwr event irq thread.After re-enabling
2272 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2273 * all other power events.
2274 */
2275 if (atomic_read(&dwc->in_lpm)) {
2276 /* set this to call dwc3_msm_resume() */
2277 mdwc->resume_pending = true;
2278 return IRQ_WAKE_THREAD;
2279 }
2280
2281 dwc3_pwr_event_handler(mdwc);
2282 return IRQ_HANDLED;
2283}
2284
2285static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2286 unsigned long action, void *hcpu)
2287{
2288 uint32_t cpu = (uintptr_t)hcpu;
2289 struct dwc3_msm *mdwc =
2290 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2291
2292 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2293 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2294 cpu_to_affin, mdwc->irq_to_affin);
2295 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2296 }
2297
2298 return NOTIFY_OK;
2299}
2300
2301static void dwc3_otg_sm_work(struct work_struct *w);
2302
2303static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2304{
2305 int ret;
2306
2307 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2308 if (IS_ERR(mdwc->dwc3_gdsc))
2309 mdwc->dwc3_gdsc = NULL;
2310
2311 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2312 if (IS_ERR(mdwc->xo_clk)) {
2313 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2314 __func__);
2315 ret = PTR_ERR(mdwc->xo_clk);
2316 return ret;
2317 }
2318 clk_set_rate(mdwc->xo_clk, 19200000);
2319
2320 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2321 if (IS_ERR(mdwc->iface_clk)) {
2322 dev_err(mdwc->dev, "failed to get iface_clk\n");
2323 ret = PTR_ERR(mdwc->iface_clk);
2324 return ret;
2325 }
2326
2327 /*
2328 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2329 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2330 * On newer platform it can run at 150MHz as well.
2331 */
2332 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2333 if (IS_ERR(mdwc->core_clk)) {
2334 dev_err(mdwc->dev, "failed to get core_clk\n");
2335 ret = PTR_ERR(mdwc->core_clk);
2336 return ret;
2337 }
2338
2339 /*
2340 * Get Max supported clk frequency for USB Core CLK and request
2341 * to set the same.
2342 */
2343 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
2344 if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
2345 dev_err(mdwc->dev, "fail to get core clk max freq.\n");
2346 } else {
2347 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2348 if (ret)
2349 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n",
2350 ret);
2351 }
2352
2353 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2354 if (IS_ERR(mdwc->sleep_clk)) {
2355 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2356 ret = PTR_ERR(mdwc->sleep_clk);
2357 return ret;
2358 }
2359
2360 clk_set_rate(mdwc->sleep_clk, 32000);
2361 mdwc->utmi_clk_rate = 19200000;
2362 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2363 if (IS_ERR(mdwc->utmi_clk)) {
2364 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2365 ret = PTR_ERR(mdwc->utmi_clk);
2366 return ret;
2367 }
2368
2369 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2370 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2371 if (IS_ERR(mdwc->bus_aggr_clk))
2372 mdwc->bus_aggr_clk = NULL;
2373
2374 if (of_property_match_string(mdwc->dev->of_node,
2375 "clock-names", "cfg_ahb_clk") >= 0) {
2376 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2377 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2378 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2379 mdwc->cfg_ahb_clk = NULL;
2380 if (ret != -EPROBE_DEFER)
2381 dev_err(mdwc->dev,
2382 "failed to get cfg_ahb_clk ret %d\n",
2383 ret);
2384 return ret;
2385 }
2386 }
2387
2388 return 0;
2389}
2390
2391static int dwc3_msm_id_notifier(struct notifier_block *nb,
2392 unsigned long event, void *ptr)
2393{
2394 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
2395 struct extcon_dev *edev = ptr;
2396 enum dwc3_id_state id;
2397 int cc_state;
2398
2399 if (!edev) {
2400 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2401 goto done;
2402 }
2403
2404 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2405
2406 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2407
2408 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2409 if (cc_state < 0)
2410 mdwc->typec_orientation = ORIENTATION_NONE;
2411 else
2412 mdwc->typec_orientation =
2413 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2414
2415 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2416
2417 if (mdwc->id_state != id) {
2418 mdwc->id_state = id;
2419 dbg_event(0xFF, "id_state", mdwc->id_state);
2420 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2421 }
2422
2423done:
2424 return NOTIFY_DONE;
2425}
2426
2427static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2428 unsigned long event, void *ptr)
2429{
2430 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2431 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2432 struct extcon_dev *edev = ptr;
2433 int cc_state;
2434
2435 if (!edev) {
2436 dev_err(mdwc->dev, "%s: edev null\n", __func__);
2437 goto done;
2438 }
2439
2440 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2441
2442 if (mdwc->vbus_active == event)
2443 return NOTIFY_DONE;
2444
2445 cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
2446 if (cc_state < 0)
2447 mdwc->typec_orientation = ORIENTATION_NONE;
2448 else
2449 mdwc->typec_orientation =
2450 cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
2451
2452 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2453
2454 mdwc->vbus_active = event;
2455 if (dwc->is_drd && !mdwc->in_restart) {
2456 dbg_event(0xFF, "Q RW (vbus)", mdwc->vbus_active);
2457 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2458 }
2459done:
2460 return NOTIFY_DONE;
2461}
2462
2463static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2464{
2465 struct device_node *node = mdwc->dev->of_node;
2466 struct extcon_dev *edev;
2467 int ret = 0;
2468
2469 if (!of_property_read_bool(node, "extcon"))
2470 return 0;
2471
2472 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2473 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2474 return PTR_ERR(edev);
2475
2476 if (!IS_ERR(edev)) {
2477 mdwc->extcon_vbus = edev;
2478 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2479 ret = extcon_register_notifier(edev, EXTCON_USB,
2480 &mdwc->vbus_nb);
2481 if (ret < 0) {
2482 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2483 return ret;
2484 }
2485 }
2486
2487 /* if a second phandle was provided, use it to get a separate edev */
2488 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2489 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2490 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2491 ret = PTR_ERR(edev);
2492 goto err;
2493 }
2494 }
2495
2496 if (!IS_ERR(edev)) {
2497 mdwc->extcon_id = edev;
2498 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
2499 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2500 &mdwc->id_nb);
2501 if (ret < 0) {
2502 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2503 goto err;
2504 }
2505 }
2506
2507 return 0;
2508err:
2509 if (mdwc->extcon_vbus)
2510 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2511 &mdwc->vbus_nb);
2512 return ret;
2513}
2514
2515static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
2516 char *buf)
2517{
2518 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2519
2520 if (mdwc->vbus_active)
2521 return snprintf(buf, PAGE_SIZE, "peripheral\n");
2522 if (mdwc->id_state == DWC3_ID_GROUND)
2523 return snprintf(buf, PAGE_SIZE, "host\n");
2524
2525 return snprintf(buf, PAGE_SIZE, "none\n");
2526}
2527
2528static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
2529 const char *buf, size_t count)
2530{
2531 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
2532
2533 if (sysfs_streq(buf, "peripheral")) {
2534 mdwc->vbus_active = true;
2535 mdwc->id_state = DWC3_ID_FLOAT;
2536 } else if (sysfs_streq(buf, "host")) {
2537 mdwc->vbus_active = false;
2538 mdwc->id_state = DWC3_ID_GROUND;
2539 } else {
2540 mdwc->vbus_active = false;
2541 mdwc->id_state = DWC3_ID_FLOAT;
2542 }
2543
2544 dwc3_ext_event_notify(mdwc);
2545
2546 return count;
2547}
2548
2549static DEVICE_ATTR_RW(mode);
2550
2551static int dwc3_msm_probe(struct platform_device *pdev)
2552{
2553 struct device_node *node = pdev->dev.of_node, *dwc3_node;
2554 struct device *dev = &pdev->dev;
2555 struct dwc3_msm *mdwc;
2556 struct dwc3 *dwc;
2557 struct resource *res;
2558 void __iomem *tcsr;
2559 bool host_mode;
2560 int ret = 0;
2561 int ext_hub_reset_gpio;
2562 u32 val;
2563
2564 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
2565 if (!mdwc)
2566 return -ENOMEM;
2567
2568 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
2569 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
2570 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
2571 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
2572 return -EOPNOTSUPP;
2573 }
2574 }
2575
2576 platform_set_drvdata(pdev, mdwc);
2577 mdwc->dev = &pdev->dev;
2578
2579 INIT_LIST_HEAD(&mdwc->req_complete_list);
2580 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
2581 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
2582 INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
2583 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
2584
2585 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
2586 if (!mdwc->dwc3_wq) {
2587 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
2588 return -ENOMEM;
2589 }
2590
2591 /* Get all clks and gdsc reference */
2592 ret = dwc3_msm_get_clk_gdsc(mdwc);
2593 if (ret) {
2594 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
2595 return ret;
2596 }
2597
2598 mdwc->id_state = DWC3_ID_FLOAT;
2599 set_bit(ID, &mdwc->inputs);
2600
2601 mdwc->charging_disabled = of_property_read_bool(node,
2602 "qcom,charging-disabled");
2603
2604 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
2605 &mdwc->lpm_to_suspend_delay);
2606 if (ret) {
2607 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
2608 mdwc->lpm_to_suspend_delay = 0;
2609 }
2610
2611 /*
2612 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
2613 * DP and DM linestate transitions during low power mode.
2614 */
2615 mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
2616 if (mdwc->hs_phy_irq < 0) {
2617 dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
2618 ret = -EINVAL;
2619 goto err;
2620 } else {
2621 irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
2622 ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
2623 msm_dwc3_pwr_irq,
2624 msm_dwc3_pwr_irq_thread,
2625 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2626 | IRQF_ONESHOT, "hs_phy_irq", mdwc);
2627 if (ret) {
2628 dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
2629 ret);
2630 goto err;
2631 }
2632 }
2633
2634 mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
2635 if (mdwc->ss_phy_irq < 0) {
2636 dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
2637 } else {
2638 irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
2639 ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
2640 msm_dwc3_pwr_irq,
2641 msm_dwc3_pwr_irq_thread,
2642 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
2643 | IRQF_ONESHOT, "ss_phy_irq", mdwc);
2644 if (ret) {
2645 dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
2646 ret);
2647 goto err;
2648 }
2649 }
2650
2651 mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
2652 if (mdwc->pwr_event_irq < 0) {
2653 dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
2654 ret = -EINVAL;
2655 goto err;
2656 } else {
2657 /* will be enabled in dwc3_msm_resume() */
2658 irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
2659 ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
2660 msm_dwc3_pwr_irq,
2661 msm_dwc3_pwr_irq_thread,
2662 IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
2663 "msm_dwc3", mdwc);
2664 if (ret) {
2665 dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
2666 ret);
2667 goto err;
2668 }
2669 }
2670
2671 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
2672 if (!res) {
2673 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
2674 } else {
2675 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
2676 resource_size(res));
2677 if (IS_ERR_OR_NULL(tcsr)) {
2678 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
2679 } else {
2680 /* Enable USB3 on the primary USB port. */
2681 writel_relaxed(0x1, tcsr);
2682 /*
2683 * Ensure that TCSR write is completed before
2684 * USB registers initialization.
2685 */
2686 mb();
2687 }
2688 }
2689
2690 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
2691 if (!res) {
2692 dev_err(&pdev->dev, "missing memory base resource\n");
2693 ret = -ENODEV;
2694 goto err;
2695 }
2696
2697 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
2698 resource_size(res));
2699 if (!mdwc->base) {
2700 dev_err(&pdev->dev, "ioremap failed\n");
2701 ret = -ENODEV;
2702 goto err;
2703 }
2704
2705 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2706 "ahb2phy_base");
2707 if (res) {
2708 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
2709 res->start, resource_size(res));
2710 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
2711 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
2712 mdwc->ahb2phy_base = NULL;
2713 } else {
2714 /*
2715 * On some targets cfg_ahb_clk depends upon usb gdsc
2716 * regulator. If cfg_ahb_clk is enabled without
2717 * turning on usb gdsc regulator clk is stuck off.
2718 */
2719 dwc3_msm_config_gdsc(mdwc, 1);
2720 clk_prepare_enable(mdwc->cfg_ahb_clk);
2721 /* Configure AHB2PHY for one wait state read/write*/
2722 val = readl_relaxed(mdwc->ahb2phy_base +
2723 PERIPH_SS_AHB2PHY_TOP_CFG);
2724 if (val != ONE_READ_WRITE_WAIT) {
2725 writel_relaxed(ONE_READ_WRITE_WAIT,
2726 mdwc->ahb2phy_base +
2727 PERIPH_SS_AHB2PHY_TOP_CFG);
2728 /* complete above write before using USB PHY */
2729 mb();
2730 }
2731 clk_disable_unprepare(mdwc->cfg_ahb_clk);
2732 dwc3_msm_config_gdsc(mdwc, 0);
2733 }
2734 }
2735
2736 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
2737 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
2738 if (IS_ERR(mdwc->dbm)) {
2739 dev_err(&pdev->dev, "unable to get dbm device\n");
2740 ret = -EPROBE_DEFER;
2741 goto err;
2742 }
2743 /*
2744 * Add power event if the dbm indicates coming out of L1
2745 * by interrupt
2746 */
2747 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
2748 if (!mdwc->pwr_event_irq) {
2749 dev_err(&pdev->dev,
2750 "need pwr_event_irq exiting L1\n");
2751 ret = -EINVAL;
2752 goto err;
2753 }
2754 }
2755 }
2756
2757 ext_hub_reset_gpio = of_get_named_gpio(node,
2758 "qcom,ext-hub-reset-gpio", 0);
2759
2760 if (gpio_is_valid(ext_hub_reset_gpio)
2761 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
2762 "qcom,ext-hub-reset-gpio"))) {
2763 /* reset external hub */
2764 gpio_direction_output(ext_hub_reset_gpio, 1);
2765 /*
2766 * Hub reset should be asserted for minimum 5microsec
2767 * before deasserting.
2768 */
2769 usleep_range(5, 1000);
2770 gpio_direction_output(ext_hub_reset_gpio, 0);
2771 }
2772
2773 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
2774 &mdwc->tx_fifo_size))
2775 dev_err(&pdev->dev,
2776 "unable to read platform data tx fifo size\n");
2777
2778 mdwc->disable_host_mode_pm = of_property_read_bool(node,
2779 "qcom,disable-host-mode-pm");
2780
2781 dwc3_set_notifier(&dwc3_msm_notify_event);
2782
2783 /* Assumes dwc3 is the first DT child of dwc3-msm */
2784 dwc3_node = of_get_next_available_child(node, NULL);
2785 if (!dwc3_node) {
2786 dev_err(&pdev->dev, "failed to find dwc3 child\n");
2787 ret = -ENODEV;
2788 goto err;
2789 }
2790
2791 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2792 if (ret) {
2793 dev_err(&pdev->dev,
2794 "failed to add create dwc3 core\n");
2795 of_node_put(dwc3_node);
2796 goto err;
2797 }
2798
2799 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
2800 of_node_put(dwc3_node);
2801 if (!mdwc->dwc3) {
2802 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
2803 goto put_dwc3;
2804 }
2805
2806 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2807 "usb-phy", 0);
2808 if (IS_ERR(mdwc->hs_phy)) {
2809 dev_err(&pdev->dev, "unable to get hsphy device\n");
2810 ret = PTR_ERR(mdwc->hs_phy);
2811 goto put_dwc3;
2812 }
2813 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
2814 "usb-phy", 1);
2815 if (IS_ERR(mdwc->ss_phy)) {
2816 dev_err(&pdev->dev, "unable to get ssphy device\n");
2817 ret = PTR_ERR(mdwc->ss_phy);
2818 goto put_dwc3;
2819 }
2820
2821 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2822 if (mdwc->bus_scale_table) {
2823 mdwc->bus_perf_client =
2824 msm_bus_scale_register_client(mdwc->bus_scale_table);
2825 }
2826
2827 dwc = platform_get_drvdata(mdwc->dwc3);
2828 if (!dwc) {
2829 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
2830 goto put_dwc3;
2831 }
2832
2833 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
2834 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
2835
2836 if (cpu_to_affin)
2837 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2838
2839 /*
2840 * Clocks and regulators will not be turned on until the first time
2841 * runtime PM resume is called. This is to allow for booting up with
2842 * charger already connected so as not to disturb PHY line states.
2843 */
2844 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
2845 atomic_set(&dwc->in_lpm, 1);
2846 pm_runtime_set_suspended(mdwc->dev);
2847 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
2848 pm_runtime_use_autosuspend(mdwc->dev);
2849 pm_runtime_enable(mdwc->dev);
2850 device_init_wakeup(mdwc->dev, 1);
2851
2852 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
2853 pm_runtime_get_noresume(mdwc->dev);
2854
2855 ret = dwc3_msm_extcon_register(mdwc);
2856 if (ret)
2857 goto put_dwc3;
2858
2859 /* Update initial VBUS/ID state from extcon */
2860 if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
2861 EXTCON_USB))
2862 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
2863 if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
2864 EXTCON_USB_HOST))
2865 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
2866
2867 device_create_file(&pdev->dev, &dev_attr_mode);
2868
2869 schedule_delayed_work(&mdwc->sm_work, 0);
2870
2871 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
2872 if (!dwc->is_drd && host_mode) {
2873 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
2874 mdwc->id_state = DWC3_ID_GROUND;
2875 dwc3_ext_event_notify(mdwc);
2876 }
2877
2878 return 0;
2879
2880put_dwc3:
2881 platform_device_put(mdwc->dwc3);
2882 if (mdwc->bus_perf_client)
2883 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2884err:
2885 return ret;
2886}
2887
2888static int dwc3_msm_remove_children(struct device *dev, void *data)
2889{
2890 device_unregister(dev);
2891 return 0;
2892}
2893
2894static int dwc3_msm_remove(struct platform_device *pdev)
2895{
2896 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
2897 int ret_pm;
2898
2899 device_remove_file(&pdev->dev, &dev_attr_mode);
2900
2901 if (cpu_to_affin)
2902 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
2903
2904 /*
2905 * In case of system suspend, pm_runtime_get_sync fails.
2906 * Hence turn ON the clocks manually.
2907 */
2908 ret_pm = pm_runtime_get_sync(mdwc->dev);
2909 dbg_event(0xFF, "Remov gsyn", ret_pm);
2910 if (ret_pm < 0) {
2911 dev_err(mdwc->dev,
2912 "pm_runtime_get_sync failed with %d\n", ret_pm);
2913 clk_prepare_enable(mdwc->utmi_clk);
2914 clk_prepare_enable(mdwc->core_clk);
2915 clk_prepare_enable(mdwc->iface_clk);
2916 clk_prepare_enable(mdwc->sleep_clk);
2917 if (mdwc->bus_aggr_clk)
2918 clk_prepare_enable(mdwc->bus_aggr_clk);
2919 clk_prepare_enable(mdwc->xo_clk);
2920 }
2921
2922 cancel_delayed_work_sync(&mdwc->sm_work);
2923
2924 if (mdwc->hs_phy)
2925 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
2926 platform_device_put(mdwc->dwc3);
2927 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
2928
2929 dbg_event(0xFF, "Remov put", 0);
2930 pm_runtime_disable(mdwc->dev);
2931 pm_runtime_barrier(mdwc->dev);
2932 pm_runtime_put_sync(mdwc->dev);
2933 pm_runtime_set_suspended(mdwc->dev);
2934 device_wakeup_disable(mdwc->dev);
2935
2936 if (mdwc->bus_perf_client)
2937 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
2938
2939 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
2940 regulator_disable(mdwc->vbus_reg);
2941
2942 disable_irq(mdwc->hs_phy_irq);
2943 if (mdwc->ss_phy_irq)
2944 disable_irq(mdwc->ss_phy_irq);
2945 disable_irq(mdwc->pwr_event_irq);
2946
2947 clk_disable_unprepare(mdwc->utmi_clk);
2948 clk_set_rate(mdwc->core_clk, 19200000);
2949 clk_disable_unprepare(mdwc->core_clk);
2950 clk_disable_unprepare(mdwc->iface_clk);
2951 clk_disable_unprepare(mdwc->sleep_clk);
2952 clk_disable_unprepare(mdwc->xo_clk);
2953 clk_put(mdwc->xo_clk);
2954
2955 dwc3_msm_config_gdsc(mdwc, 0);
2956
2957 return 0;
2958}
2959
2960#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
2961
2962/**
2963 * dwc3_otg_start_host - helper function for starting/stoping the host
2964 * controller driver.
2965 *
2966 * @mdwc: Pointer to the dwc3_msm structure.
2967 * @on: start / stop the host controller driver.
2968 *
2969 * Returns 0 on success otherwise negative errno.
2970 */
2971static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
2972{
2973 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2974 int ret = 0;
2975
2976 if (!dwc->xhci)
2977 return -EINVAL;
2978
2979 /*
2980 * The vbus_reg pointer could have multiple values
2981 * NULL: regulator_get() hasn't been called, or was previously deferred
2982 * IS_ERR: regulator could not be obtained, so skip using it
2983 * Valid pointer otherwise
2984 */
2985 if (!mdwc->vbus_reg) {
2986 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
2987 "vbus_dwc3");
2988 if (IS_ERR(mdwc->vbus_reg) &&
2989 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
2990 /* regulators may not be ready, so retry again later */
2991 mdwc->vbus_reg = NULL;
2992 return -EPROBE_DEFER;
2993 }
2994 }
2995
2996 if (on) {
2997 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
2998
2999 pm_runtime_get_sync(mdwc->dev);
3000 dbg_event(0xFF, "StrtHost gync",
3001 atomic_read(&mdwc->dev->power.usage_count));
3002 mdwc->hs_phy->flags |= PHY_HOST_MODE;
3003 mdwc->ss_phy->flags |= PHY_HOST_MODE;
3004 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3005 if (!IS_ERR(mdwc->vbus_reg))
3006 ret = regulator_enable(mdwc->vbus_reg);
3007 if (ret) {
3008 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3009 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3010 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3011 pm_runtime_put_sync(mdwc->dev);
3012 dbg_event(0xFF, "vregerr psync",
3013 atomic_read(&mdwc->dev->power.usage_count));
3014 return ret;
3015 }
3016
3017 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3018
3019 /*
3020 * FIXME If micro A cable is disconnected during system suspend,
3021 * xhci platform device will be removed before runtime pm is
3022 * enabled for xhci device. Due to this, disable_depth becomes
3023 * greater than one and runtimepm is not enabled for next microA
3024 * connect. Fix this by calling pm_runtime_init for xhci device.
3025 */
3026 pm_runtime_init(&dwc->xhci->dev);
3027 ret = platform_device_add(dwc->xhci);
3028 if (ret) {
3029 dev_err(mdwc->dev,
3030 "%s: failed to add XHCI pdev ret=%d\n",
3031 __func__, ret);
3032 if (!IS_ERR(mdwc->vbus_reg))
3033 regulator_disable(mdwc->vbus_reg);
3034 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3035 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3036 pm_runtime_put_sync(mdwc->dev);
3037 dbg_event(0xFF, "pdeverr psync",
3038 atomic_read(&mdwc->dev->power.usage_count));
3039 return ret;
3040 }
3041
3042 /*
3043 * In some cases it is observed that USB PHY is not going into
3044 * suspend with host mode suspend functionality. Hence disable
3045 * XHCI's runtime PM here if disable_host_mode_pm is set.
3046 */
3047 if (mdwc->disable_host_mode_pm)
3048 pm_runtime_disable(&dwc->xhci->dev);
3049
3050 mdwc->in_host_mode = true;
3051 dwc3_usb3_phy_suspend(dwc, true);
3052
3053 /* xHCI should have incremented child count as necessary */
3054 dbg_event(0xFF, "StrtHost psync",
3055 atomic_read(&mdwc->dev->power.usage_count));
3056 pm_runtime_mark_last_busy(mdwc->dev);
3057 pm_runtime_put_sync_autosuspend(mdwc->dev);
3058 } else {
3059 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3060
3061 if (!IS_ERR(mdwc->vbus_reg))
3062 ret = regulator_disable(mdwc->vbus_reg);
3063 if (ret) {
3064 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3065 return ret;
3066 }
3067
3068 pm_runtime_get_sync(mdwc->dev);
3069 dbg_event(0xFF, "StopHost gsync",
3070 atomic_read(&mdwc->dev->power.usage_count));
3071 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3072 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3073 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3074 platform_device_del(dwc->xhci);
3075
3076 /*
3077 * Perform USB hardware RESET (both core reset and DBM reset)
3078 * when moving from host to peripheral. This is required for
3079 * peripheral mode to work.
3080 */
3081 dwc3_msm_block_reset(mdwc, true);
3082
3083 dwc3_usb3_phy_suspend(dwc, false);
3084 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3085
3086 mdwc->in_host_mode = false;
3087
3088 /* re-init core and OTG registers as block reset clears these */
3089 dwc3_post_host_reset_core_init(dwc);
3090 pm_runtime_mark_last_busy(mdwc->dev);
3091 pm_runtime_put_sync_autosuspend(mdwc->dev);
3092 dbg_event(0xFF, "StopHost psync",
3093 atomic_read(&mdwc->dev->power.usage_count));
3094 }
3095
3096 return 0;
3097}
3098
3099static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3100{
3101 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3102
3103 /* Update OTG VBUS Valid from HSPHY to controller */
3104 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3105 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3106 UTMI_OTG_VBUS_VALID,
3107 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3108
3109 /* Update only if Super Speed is supported */
3110 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3111 /* Update VBUS Valid from SSPHY to controller */
3112 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3113 LANE0_PWR_PRESENT,
3114 vbus_present ? LANE0_PWR_PRESENT : 0);
3115 }
3116}
3117
3118/**
3119 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3120 *
3121 * @mdwc: Pointer to the dwc3_msm structure.
3122 * @on: Turn ON/OFF the gadget.
3123 *
3124 * Returns 0 on success otherwise negative errno.
3125 */
3126static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3127{
3128 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3129
3130 pm_runtime_get_sync(mdwc->dev);
3131 dbg_event(0xFF, "StrtGdgt gsync",
3132 atomic_read(&mdwc->dev->power.usage_count));
3133
3134 if (on) {
3135 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3136 __func__, dwc->gadget.name);
3137
3138 dwc3_override_vbus_status(mdwc, true);
3139 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3140 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3141
3142 /*
3143 * Core reset is not required during start peripheral. Only
3144 * DBM reset is required, hence perform only DBM reset here.
3145 */
3146 dwc3_msm_block_reset(mdwc, false);
3147
3148 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3149 usb_gadget_vbus_connect(&dwc->gadget);
3150 } else {
3151 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3152 __func__, dwc->gadget.name);
3153 usb_gadget_vbus_disconnect(&dwc->gadget);
3154 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3155 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3156 dwc3_override_vbus_status(mdwc, false);
3157 dwc3_usb3_phy_suspend(dwc, false);
3158 }
3159
3160 pm_runtime_put_sync(mdwc->dev);
3161 dbg_event(0xFF, "StopGdgt psync",
3162 atomic_read(&mdwc->dev->power.usage_count));
3163
3164 return 0;
3165}
3166
3167static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3168{
3169 union power_supply_propval pval = {0,};
3170
3171 if (mdwc->charging_disabled)
3172 return 0;
3173
3174 if (mdwc->max_power == mA)
3175 return 0;
3176
3177 if (!mdwc->usb_psy) {
3178 mdwc->usb_psy = power_supply_get_by_name("usb");
3179 if (!mdwc->usb_psy) {
3180 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3181 return -ENODEV;
3182 }
3183 }
3184
3185 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3186
3187 if (mdwc->max_power <= 2 && mA > 2) {
3188 /* Enable Charging */
3189 pval.intval = true;
3190 if (power_supply_set_property(mdwc->usb_psy,
3191 POWER_SUPPLY_PROP_ONLINE, &pval))
3192 goto psy_error;
3193 pval.intval = 1000 * mA;
3194 if (power_supply_set_property(mdwc->usb_psy,
3195 POWER_SUPPLY_PROP_CURRENT_MAX, &pval))
3196 goto psy_error;
3197 } else if (mdwc->max_power > 0 && (mA == 0 || mA == 2)) {
3198 /* Disable charging */
3199 pval.intval = false;
3200 if (power_supply_set_property(mdwc->usb_psy,
3201 POWER_SUPPLY_PROP_ONLINE, &pval))
3202 goto psy_error;
3203 } else {
3204 /* Enable charging */
3205 pval.intval = true;
3206 if (power_supply_set_property(mdwc->usb_psy,
3207 POWER_SUPPLY_PROP_ONLINE, &pval))
3208 goto psy_error;
3209 }
3210
3211 /* Set max current limit in uA */
3212 pval.intval = 1000 * mA;
3213 if (power_supply_set_property(mdwc->usb_psy,
3214 POWER_SUPPLY_PROP_CURRENT_MAX, &pval))
3215 goto psy_error;
3216
3217 mdwc->max_power = mA;
3218 return 0;
3219
3220psy_error:
3221 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3222 return -ENXIO;
3223}
3224
3225
3226/**
3227 * dwc3_otg_sm_work - workqueue function.
3228 *
3229 * @w: Pointer to the dwc3 otg workqueue
3230 *
3231 * NOTE: After any change in otg_state, we must reschdule the state machine.
3232 */
3233static void dwc3_otg_sm_work(struct work_struct *w)
3234{
3235 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3236 struct dwc3 *dwc = NULL;
3237 bool work = 0;
3238 int ret = 0;
3239 unsigned long delay = 0;
3240 const char *state;
3241
3242 if (mdwc->dwc3)
3243 dwc = platform_get_drvdata(mdwc->dwc3);
3244
3245 if (!dwc) {
3246 dev_err(mdwc->dev, "dwc is NULL.\n");
3247 return;
3248 }
3249
3250 state = usb_otg_state_string(mdwc->otg_state);
3251 dev_dbg(mdwc->dev, "%s state\n", state);
3252 dbg_event(0xFF, state, 0);
3253
3254 /* Check OTG state */
3255 switch (mdwc->otg_state) {
3256 case OTG_STATE_UNDEFINED:
3257 /* Do nothing if no cable connected */
3258 if (test_bit(ID, &mdwc->inputs) &&
3259 !test_bit(B_SESS_VLD, &mdwc->inputs))
3260 break;
3261
3262 dbg_event(0xFF, "Exit UNDEF", 0);
3263 mdwc->otg_state = OTG_STATE_B_IDLE;
3264 /* fall-through */
3265 case OTG_STATE_B_IDLE:
3266 if (!test_bit(ID, &mdwc->inputs)) {
3267 dev_dbg(mdwc->dev, "!id\n");
3268 mdwc->otg_state = OTG_STATE_A_IDLE;
3269 work = 1;
3270 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
3271 dev_dbg(mdwc->dev, "b_sess_vld\n");
3272 /*
3273 * Increment pm usage count upon cable connect. Count
3274 * is decremented in OTG_STATE_B_PERIPHERAL state on
3275 * cable disconnect or in bus suspend.
3276 */
3277 pm_runtime_get_sync(mdwc->dev);
3278 dbg_event(0xFF, "BIDLE gsync",
3279 atomic_read(&mdwc->dev->power.usage_count));
3280 dwc3_otg_start_peripheral(mdwc, 1);
3281 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3282 work = 1;
3283 } else {
3284 dwc3_msm_gadget_vbus_draw(mdwc, 0);
3285 dev_dbg(mdwc->dev, "Cable disconnected\n");
3286 }
3287 break;
3288
3289 case OTG_STATE_B_PERIPHERAL:
3290 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
3291 !test_bit(ID, &mdwc->inputs)) {
3292 dev_dbg(mdwc->dev, "!id || !bsv\n");
3293 mdwc->otg_state = OTG_STATE_B_IDLE;
3294 dwc3_otg_start_peripheral(mdwc, 0);
3295 /*
3296 * Decrement pm usage count upon cable disconnect
3297 * which was incremented upon cable connect in
3298 * OTG_STATE_B_IDLE state
3299 */
3300 pm_runtime_put_sync(mdwc->dev);
3301 dbg_event(0xFF, "!BSV psync",
3302 atomic_read(&mdwc->dev->power.usage_count));
3303 work = 1;
3304 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
3305 test_bit(B_SESS_VLD, &mdwc->inputs)) {
3306 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
3307 mdwc->otg_state = OTG_STATE_B_SUSPEND;
3308 /*
3309 * Decrement pm usage count upon bus suspend.
3310 * Count was incremented either upon cable
3311 * connect in OTG_STATE_B_IDLE or host
3312 * initiated resume after bus suspend in
3313 * OTG_STATE_B_SUSPEND state
3314 */
3315 pm_runtime_mark_last_busy(mdwc->dev);
3316 pm_runtime_put_autosuspend(mdwc->dev);
3317 dbg_event(0xFF, "SUSP put",
3318 atomic_read(&mdwc->dev->power.usage_count));
3319 }
3320 break;
3321
3322 case OTG_STATE_B_SUSPEND:
3323 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
3324 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
3325 mdwc->otg_state = OTG_STATE_B_IDLE;
3326 dwc3_otg_start_peripheral(mdwc, 0);
3327 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
3328 dev_dbg(mdwc->dev, "BSUSP !susp\n");
3329 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
3330 /*
3331 * Increment pm usage count upon host
3332 * initiated resume. Count was decremented
3333 * upon bus suspend in
3334 * OTG_STATE_B_PERIPHERAL state.
3335 */
3336 pm_runtime_get_sync(mdwc->dev);
3337 dbg_event(0xFF, "!SUSP gsync",
3338 atomic_read(&mdwc->dev->power.usage_count));
3339 }
3340 break;
3341
3342 case OTG_STATE_A_IDLE:
3343 /* Switch to A-Device*/
3344 if (test_bit(ID, &mdwc->inputs)) {
3345 dev_dbg(mdwc->dev, "id\n");
3346 mdwc->otg_state = OTG_STATE_B_IDLE;
3347 mdwc->vbus_retry_count = 0;
3348 work = 1;
3349 } else {
3350 mdwc->otg_state = OTG_STATE_A_HOST;
3351 ret = dwc3_otg_start_host(mdwc, 1);
3352 if ((ret == -EPROBE_DEFER) &&
3353 mdwc->vbus_retry_count < 3) {
3354 /*
3355 * Get regulator failed as regulator driver is
3356 * not up yet. Will try to start host after 1sec
3357 */
3358 mdwc->otg_state = OTG_STATE_A_IDLE;
3359 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
3360 delay = VBUS_REG_CHECK_DELAY;
3361 work = 1;
3362 mdwc->vbus_retry_count++;
3363 } else if (ret) {
3364 dev_err(mdwc->dev, "unable to start host\n");
3365 mdwc->otg_state = OTG_STATE_A_IDLE;
3366 goto ret;
3367 }
3368 }
3369 break;
3370
3371 case OTG_STATE_A_HOST:
3372 if (test_bit(ID, &mdwc->inputs)) {
3373 dev_dbg(mdwc->dev, "id\n");
3374 dwc3_otg_start_host(mdwc, 0);
3375 mdwc->otg_state = OTG_STATE_B_IDLE;
3376 mdwc->vbus_retry_count = 0;
3377 work = 1;
3378 } else {
3379 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
3380 dbg_event(0xFF, "XHCIResume", 0);
3381 if (dwc)
3382 pm_runtime_resume(&dwc->xhci->dev);
3383 }
3384 break;
3385
3386 default:
3387 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
3388
3389 }
3390
3391 if (work)
3392 schedule_delayed_work(&mdwc->sm_work, delay);
3393
3394ret:
3395 return;
3396}
3397
3398#ifdef CONFIG_PM_SLEEP
3399static int dwc3_msm_pm_suspend(struct device *dev)
3400{
3401 int ret = 0;
3402 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3403 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3404
3405 dev_dbg(dev, "dwc3-msm PM suspend\n");
3406 dbg_event(0xFF, "PM Sus", 0);
3407
3408 flush_workqueue(mdwc->dwc3_wq);
3409 if (!atomic_read(&dwc->in_lpm)) {
3410 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
3411 return -EBUSY;
3412 }
3413
3414 ret = dwc3_msm_suspend(mdwc);
3415 if (!ret)
3416 atomic_set(&mdwc->pm_suspended, 1);
3417
3418 return ret;
3419}
3420
3421static int dwc3_msm_pm_resume(struct device *dev)
3422{
3423 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3424
3425 dev_dbg(dev, "dwc3-msm PM resume\n");
3426
3427 dbg_event(0xFF, "PM Res", 0);
3428
3429 /* flush to avoid race in read/write of pm_suspended */
3430 flush_workqueue(mdwc->dwc3_wq);
3431 atomic_set(&mdwc->pm_suspended, 0);
3432
3433 /* kick in otg state machine */
3434 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
3435
3436 return 0;
3437}
3438#endif
3439
3440#ifdef CONFIG_PM
3441static int dwc3_msm_runtime_idle(struct device *dev)
3442{
3443 dev_dbg(dev, "DWC3-msm runtime idle\n");
3444 dbg_event(0xFF, "RT Idle", 0);
3445
3446 return 0;
3447}
3448
3449static int dwc3_msm_runtime_suspend(struct device *dev)
3450{
3451 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3452
3453 dev_dbg(dev, "DWC3-msm runtime suspend\n");
3454 dbg_event(0xFF, "RT Sus", 0);
3455
3456 return dwc3_msm_suspend(mdwc);
3457}
3458
3459static int dwc3_msm_runtime_resume(struct device *dev)
3460{
3461 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3462
3463 dev_dbg(dev, "DWC3-msm runtime resume\n");
3464 dbg_event(0xFF, "RT Res", 0);
3465
3466 return dwc3_msm_resume(mdwc);
3467}
3468#endif
3469
3470static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
3471 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
3472 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
3473 dwc3_msm_runtime_idle)
3474};
3475
3476static const struct of_device_id of_dwc3_matach[] = {
3477 {
3478 .compatible = "qcom,dwc-usb3-msm",
3479 },
3480 { },
3481};
3482MODULE_DEVICE_TABLE(of, of_dwc3_matach);
3483
3484static struct platform_driver dwc3_msm_driver = {
3485 .probe = dwc3_msm_probe,
3486 .remove = dwc3_msm_remove,
3487 .driver = {
3488 .name = "msm-dwc3",
3489 .pm = &dwc3_msm_dev_pm_ops,
3490 .of_match_table = of_dwc3_matach,
3491 },
3492};
3493
3494MODULE_LICENSE("GPL v2");
3495MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
3496
3497static int dwc3_msm_init(void)
3498{
3499 return platform_driver_register(&dwc3_msm_driver);
3500}
3501module_init(dwc3_msm_init);
3502
3503static void __exit dwc3_msm_exit(void)
3504{
3505 platform_driver_unregister(&dwc3_msm_driver);
3506}
3507module_exit(dwc3_msm_exit);