blob: aca5d97d5e00452f6f0f94e0157a05e5cba06663 [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
Hemant Kumar006fae42017-07-12 18:11:25 -070058#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
59
Mayank Rana511f3b22016-08-02 12:00:11 -070060/* time out to wait for USB cable status notification (in ms)*/
61#define SM_INIT_TIMEOUT 30000
62
63/* AHB2PHY register offsets */
64#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
65
66/* AHB2PHY read/write waite value */
67#define ONE_READ_WRITE_WAIT 0x11
68
69/* cpu to fix usb interrupt */
70static int cpu_to_affin;
71module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
72MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
73
Mayank Ranaf70d8212017-06-12 14:02:07 -070074/* override for USB speed */
75static int override_usb_speed;
76module_param(override_usb_speed, int, 0644);
77MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
78
Mayank Rana511f3b22016-08-02 12:00:11 -070079/* XHCI registers */
80#define USB3_HCSPARAMS1 (0x4)
81#define USB3_PORTSC (0x420)
82
83/**
84 * USB QSCRATCH Hardware registers
85 *
86 */
87#define QSCRATCH_REG_OFFSET (0x000F8800)
88#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
89#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
90#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
91#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
92
93#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
94#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
95#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
96#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
97#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
98
99/* QSCRATCH_GENERAL_CFG register bit offset */
100#define PIPE_UTMI_CLK_SEL BIT(0)
101#define PIPE3_PHYSTATUS_SW BIT(3)
102#define PIPE_UTMI_CLK_DIS BIT(8)
103
104#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
105#define UTMI_OTG_VBUS_VALID BIT(20)
106#define SW_SESSVLD_SEL BIT(28)
107
108#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
109#define LANE0_PWR_PRESENT BIT(24)
110
111/* GSI related registers */
112#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
113#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
114
115#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
116#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
117#define GSI_CLK_EN_MASK BIT(12)
118#define BLOCK_GSI_WR_GO_MASK BIT(1)
119#define GSI_EN_MASK BIT(0)
120
121#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
122#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
123#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
124#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
125
126#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
127#define GSI_WR_CTRL_STATE_MASK BIT(15)
128
Mayank Ranaf4918d32016-12-15 13:35:55 -0800129#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
130#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
131#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
132#define DWC3_GEVENT_TYPE_GSI 0x3
133
Mayank Rana511f3b22016-08-02 12:00:11 -0700134struct dwc3_msm_req_complete {
135 struct list_head list_item;
136 struct usb_request *req;
137 void (*orig_complete)(struct usb_ep *ep,
138 struct usb_request *req);
139};
140
141enum dwc3_id_state {
142 DWC3_ID_GROUND = 0,
143 DWC3_ID_FLOAT,
144};
145
146/* for type c cable */
147enum plug_orientation {
148 ORIENTATION_NONE,
149 ORIENTATION_CC1,
150 ORIENTATION_CC2,
151};
152
Mayank Ranad339abe2017-05-31 09:19:49 -0700153enum msm_usb_irq {
154 HS_PHY_IRQ,
155 PWR_EVNT_IRQ,
156 DP_HS_PHY_IRQ,
157 DM_HS_PHY_IRQ,
158 SS_PHY_IRQ,
159 USB_MAX_IRQ
160};
161
162struct usb_irq {
163 char *name;
164 int irq;
165 bool enable;
166};
167
168static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
169 {"hs_phy_irq", 0},
170 {"pwr_event_irq", 0},
171 {"dp_hs_phy_irq", 0},
172 {"dm_hs_phy_irq", 0},
173 {"ss_phy_irq", 0},
174};
175
Mayank Rana511f3b22016-08-02 12:00:11 -0700176/* Input bits to state machine (mdwc->inputs) */
177
178#define ID 0
179#define B_SESS_VLD 1
180#define B_SUSPEND 2
181
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530182#define PM_QOS_SAMPLE_SEC 2
183#define PM_QOS_THRESHOLD 400
184
Mayank Rana511f3b22016-08-02 12:00:11 -0700185struct dwc3_msm {
186 struct device *dev;
187 void __iomem *base;
188 void __iomem *ahb2phy_base;
189 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700190 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700191 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
192 struct list_head req_complete_list;
193 struct clk *xo_clk;
194 struct clk *core_clk;
195 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800196 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700197 struct clk *iface_clk;
198 struct clk *sleep_clk;
199 struct clk *utmi_clk;
200 unsigned int utmi_clk_rate;
201 struct clk *utmi_clk_src;
202 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530203 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530205 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700206 struct regulator *dwc3_gdsc;
207
208 struct usb_phy *hs_phy, *ss_phy;
209
210 struct dbm *dbm;
211
212 /* VBUS regulator for host mode */
213 struct regulator *vbus_reg;
214 int vbus_retry_count;
215 bool resume_pending;
216 atomic_t pm_suspended;
Mayank Ranad339abe2017-05-31 09:19:49 -0700217 struct usb_irq wakeup_irq[USB_MAX_IRQ];
Mayank Rana511f3b22016-08-02 12:00:11 -0700218 struct work_struct resume_work;
219 struct work_struct restart_usb_work;
220 bool in_restart;
221 struct workqueue_struct *dwc3_wq;
222 struct delayed_work sm_work;
223 unsigned long inputs;
224 unsigned int max_power;
225 bool charging_disabled;
226 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700227 u32 bus_perf_client;
228 struct msm_bus_scale_pdata *bus_scale_table;
229 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700230 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700231 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800232 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700233 unsigned int tx_fifo_size;
234 bool vbus_active;
235 bool suspend;
236 bool disable_host_mode_pm;
Mayank Ranad339abe2017-05-31 09:19:49 -0700237 bool use_pdc_interrupts;
Mayank Rana511f3b22016-08-02 12:00:11 -0700238 enum dwc3_id_state id_state;
239 unsigned long lpm_flags;
240#define MDWC3_SS_PHY_SUSPEND BIT(0)
241#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
242#define MDWC3_POWER_COLLAPSE BIT(2)
243
244 unsigned int irq_to_affin;
245 struct notifier_block dwc3_cpu_notifier;
Manu Gautam976fdfc2016-08-18 09:27:35 +0530246 struct notifier_block usbdev_nb;
247 bool hc_died;
Mayank Rana511f3b22016-08-02 12:00:11 -0700248
249 struct extcon_dev *extcon_vbus;
250 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800251 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700252 struct notifier_block vbus_nb;
253 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800254 struct notifier_block eud_event_nb;
Mayank Rana54d60432017-07-18 12:10:04 -0700255 struct notifier_block host_restart_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700256
Jack Pham4d4e9342016-12-07 19:25:02 -0800257 struct notifier_block host_nb;
258
Mayank Rana511f3b22016-08-02 12:00:11 -0700259 atomic_t in_p3;
260 unsigned int lpm_to_suspend_delay;
261 bool init;
262 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800263 u32 num_gsi_event_buffers;
264 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530265 int pm_qos_latency;
266 struct pm_qos_request pm_qos_req_dma;
267 struct delayed_work perf_vote_work;
Hemant Kumar006fae42017-07-12 18:11:25 -0700268 struct delayed_work sdp_check;
Mayank Rana511f3b22016-08-02 12:00:11 -0700269};
270
271#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
272#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
273#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
274
275#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
276#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
277#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
278
279#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
280#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
281#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
282
283#define DSTS_CONNECTSPD_SS 0x4
284
285
286static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
287static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800288static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana54d60432017-07-18 12:10:04 -0700289static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
290 unsigned long event, void *ptr);
Mayank Ranaf70d8212017-06-12 14:02:07 -0700291
292static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
293{
294
295 return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
296 (speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
297 && (speed <= dwc->maximum_speed));
298}
299
Mayank Rana511f3b22016-08-02 12:00:11 -0700300/**
301 *
302 * Read register with debug info.
303 *
304 * @base - DWC3 base virtual address.
305 * @offset - register offset.
306 *
307 * @return u32
308 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700309static inline u32 dwc3_msm_read_reg(void __iomem *base, u32 offset)
Mayank Rana511f3b22016-08-02 12:00:11 -0700310{
311 u32 val = ioread32(base + offset);
312 return val;
313}
314
315/**
316 * Read register masked field with debug info.
317 *
318 * @base - DWC3 base virtual address.
319 * @offset - register offset.
320 * @mask - register bitmask.
321 *
322 * @return u32
323 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700324static inline u32 dwc3_msm_read_reg_field(void __iomem *base,
Mayank Rana511f3b22016-08-02 12:00:11 -0700325 u32 offset,
326 const u32 mask)
327{
Mayank Ranad796cab2017-07-11 15:34:12 -0700328 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700329 u32 val = ioread32(base + offset);
330
331 val &= mask; /* clear other bits */
332 val >>= shift;
333 return val;
334}
335
336/**
337 *
338 * Write register with debug info.
339 *
340 * @base - DWC3 base virtual address.
341 * @offset - register offset.
342 * @val - value to write.
343 *
344 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700345static inline void dwc3_msm_write_reg(void __iomem *base, u32 offset, u32 val)
Mayank Rana511f3b22016-08-02 12:00:11 -0700346{
347 iowrite32(val, base + offset);
348}
349
350/**
351 * Write register masked field with debug info.
352 *
353 * @base - DWC3 base virtual address.
354 * @offset - register offset.
355 * @mask - register bitmask.
356 * @val - value to write.
357 *
358 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700359static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700360 const u32 mask, u32 val)
361{
Mayank Ranad796cab2017-07-11 15:34:12 -0700362 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700363 u32 tmp = ioread32(base + offset);
364
365 tmp &= ~mask; /* clear written bits */
366 val = tmp | (val << shift);
367 iowrite32(val, base + offset);
368}
369
370/**
371 * Write register and read back masked value to confirm it is written
372 *
373 * @base - DWC3 base virtual address.
374 * @offset - register offset.
375 * @mask - register bitmask specifying what should be updated
376 * @val - value to write.
377 *
378 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700379static inline void dwc3_msm_write_readback(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700380 const u32 mask, u32 val)
381{
382 u32 write_val, tmp = ioread32(base + offset);
383
384 tmp &= ~mask; /* retain other bits */
385 write_val = tmp | val;
386
387 iowrite32(write_val, base + offset);
388
389 /* Read back to see if val was written */
390 tmp = ioread32(base + offset);
391 tmp &= mask; /* clear other bits */
392
393 if (tmp != val)
394 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
395 __func__, val, offset);
396}
397
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800398static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
399{
400 int i, num_ports;
401 u32 reg;
402
403 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
404 num_ports = HCS_MAX_PORTS(reg);
405
406 for (i = 0; i < num_ports; i++) {
407 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
408 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
409 return true;
410 }
411
412 return false;
413}
414
Mayank Rana511f3b22016-08-02 12:00:11 -0700415static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
416{
417 int i, num_ports;
418 u32 reg;
419
420 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
421 num_ports = HCS_MAX_PORTS(reg);
422
423 for (i = 0; i < num_ports; i++) {
424 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
425 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
426 return true;
427 }
428
429 return false;
430}
431
432static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
433{
434 u8 speed;
435
436 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
437 return !!(speed & DSTS_CONNECTSPD_SS);
438}
439
440static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
441{
442 if (mdwc->in_host_mode)
443 return dwc3_msm_is_host_superspeed(mdwc);
444
445 return dwc3_msm_is_dev_superspeed(mdwc);
446}
447
448#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
449/**
450 * Configure the DBM with the BAM's data fifo.
451 * This function is called by the USB BAM Driver
452 * upon initialization.
453 *
454 * @ep - pointer to usb endpoint.
455 * @addr - address of data fifo.
456 * @size - size of data fifo.
457 *
458 */
459int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
460 u32 size, u8 dst_pipe_idx)
461{
462 struct dwc3_ep *dep = to_dwc3_ep(ep);
463 struct dwc3 *dwc = dep->dwc;
464 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
465
466 dev_dbg(mdwc->dev, "%s\n", __func__);
467
468 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
469 dst_pipe_idx);
470}
471
472
473/**
474* Cleanups for msm endpoint on request complete.
475*
476* Also call original request complete.
477*
478* @usb_ep - pointer to usb_ep instance.
479* @request - pointer to usb_request instance.
480*
481* @return int - 0 on success, negative on error.
482*/
483static void dwc3_msm_req_complete_func(struct usb_ep *ep,
484 struct usb_request *request)
485{
486 struct dwc3_ep *dep = to_dwc3_ep(ep);
487 struct dwc3 *dwc = dep->dwc;
488 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
489 struct dwc3_msm_req_complete *req_complete = NULL;
490
491 /* Find original request complete function and remove it from list */
492 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
493 if (req_complete->req == request)
494 break;
495 }
496 if (!req_complete || req_complete->req != request) {
497 dev_err(dep->dwc->dev, "%s: could not find the request\n",
498 __func__);
499 return;
500 }
501 list_del(&req_complete->list_item);
502
503 /*
504 * Release another one TRB to the pool since DBM queue took 2 TRBs
505 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
506 * released only one.
507 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700508 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700509
510 /* Unconfigure dbm ep */
511 dbm_ep_unconfig(mdwc->dbm, dep->number);
512
513 /*
514 * If this is the last endpoint we unconfigured, than reset also
515 * the event buffers; unless unconfiguring the ep due to lpm,
516 * in which case the event buffer only gets reset during the
517 * block reset.
518 */
519 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
520 !dbm_reset_ep_after_lpm(mdwc->dbm))
521 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
522
523 /*
524 * Call original complete function, notice that dwc->lock is already
525 * taken by the caller of this function (dwc3_gadget_giveback()).
526 */
527 request->complete = req_complete->orig_complete;
528 if (request->complete)
529 request->complete(ep, request);
530
531 kfree(req_complete);
532}
533
534
535/**
536* Helper function
537*
538* Reset DBM endpoint.
539*
540* @mdwc - pointer to dwc3_msm instance.
541* @dep - pointer to dwc3_ep instance.
542*
543* @return int - 0 on success, negative on error.
544*/
545static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
546{
547 int ret;
548
549 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
550
551 /* Reset the dbm endpoint */
552 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
553 if (ret) {
554 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
555 __func__);
556 return ret;
557 }
558
559 /*
560 * The necessary delay between asserting and deasserting the dbm ep
561 * reset is based on the number of active endpoints. If there is more
562 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
563 * delay will suffice.
564 */
565 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
566 usleep_range(1000, 1200);
567 else
568 udelay(10);
569 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
570 if (ret) {
571 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
572 __func__);
573 return ret;
574 }
575
576 return 0;
577}
578
579/**
580* Reset the DBM endpoint which is linked to the given USB endpoint.
581*
582* @usb_ep - pointer to usb_ep instance.
583*
584* @return int - 0 on success, negative on error.
585*/
586
587int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
588{
589 struct dwc3_ep *dep = to_dwc3_ep(ep);
590 struct dwc3 *dwc = dep->dwc;
591 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
592
593 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
594}
595EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
596
597
598/**
599* Helper function.
600* See the header of the dwc3_msm_ep_queue function.
601*
602* @dwc3_ep - pointer to dwc3_ep instance.
603* @req - pointer to dwc3_request instance.
604*
605* @return int - 0 on success, negative on error.
606*/
607static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
608{
609 struct dwc3_trb *trb;
610 struct dwc3_trb *trb_link;
611 struct dwc3_gadget_ep_cmd_params params;
612 u32 cmd;
613 int ret = 0;
614
Mayank Rana83ad5822016-08-09 14:17:22 -0700615 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700616 * this request is issued with start transfer. The request will be out
617 * from this list in 2 cases. The first is that the transfer will be
618 * completed (not if the transfer is endless using a circular TRBs with
619 * with link TRB). The second case is an option to do stop stransfer,
620 * this can be initiated by the function driver when calling dequeue.
621 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700622 req->started = true;
623 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700624
625 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana9ca186c2017-06-19 17:57:21 -0700626 trb = &dep->trb_pool[dep->trb_enqueue];
627 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700628 memset(trb, 0, sizeof(*trb));
629
630 req->trb = trb;
631 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
632 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
633 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
634 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
635 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
636
637 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana9ca186c2017-06-19 17:57:21 -0700638 trb_link = &dep->trb_pool[dep->trb_enqueue];
639 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700640 memset(trb_link, 0, sizeof(*trb_link));
641
642 trb_link->bpl = lower_32_bits(req->trb_dma);
643 trb_link->bph = DBM_TRB_BIT |
644 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
645 trb_link->size = 0;
646 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
647
648 /*
649 * Now start the transfer
650 */
651 memset(&params, 0, sizeof(params));
652 params.param0 = 0; /* TDAddr High */
653 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
654
655 /* DBM requires IOC to be set */
656 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700657 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700658 if (ret < 0) {
659 dev_dbg(dep->dwc->dev,
660 "%s: failed to send STARTTRANSFER command\n",
661 __func__);
662
663 list_del(&req->list);
664 return ret;
665 }
666 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700667 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700668
669 return ret;
670}
671
672/**
673* Queue a usb request to the DBM endpoint.
674* This function should be called after the endpoint
675* was enabled by the ep_enable.
676*
677* This function prepares special structure of TRBs which
678* is familiar with the DBM HW, so it will possible to use
679* this endpoint in DBM mode.
680*
681* The TRBs prepared by this function, is one normal TRB
682* which point to a fake buffer, followed by a link TRB
683* that points to the first TRB.
684*
685* The API of this function follow the regular API of
686* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
687*
688* @usb_ep - pointer to usb_ep instance.
689* @request - pointer to usb_request instance.
690* @gfp_flags - possible flags.
691*
692* @return int - 0 on success, negative on error.
693*/
694static int dwc3_msm_ep_queue(struct usb_ep *ep,
695 struct usb_request *request, gfp_t gfp_flags)
696{
697 struct dwc3_request *req = to_dwc3_request(request);
698 struct dwc3_ep *dep = to_dwc3_ep(ep);
699 struct dwc3 *dwc = dep->dwc;
700 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
701 struct dwc3_msm_req_complete *req_complete;
702 unsigned long flags;
703 int ret = 0, size;
704 u8 bam_pipe;
705 bool producer;
706 bool disable_wb;
707 bool internal_mem;
708 bool ioc;
709 bool superspeed;
710
711 if (!(request->udc_priv & MSM_SPS_MODE)) {
712 /* Not SPS mode, call original queue */
713 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
714 __func__);
715
716 return (mdwc->original_ep_ops[dep->number])->queue(ep,
717 request,
718 gfp_flags);
719 }
720
721 /* HW restriction regarding TRB size (8KB) */
722 if (req->request.length < 0x2000) {
723 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
724 return -EINVAL;
725 }
726
727 /*
728 * Override req->complete function, but before doing that,
729 * store it's original pointer in the req_complete_list.
730 */
731 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
732 if (!req_complete)
733 return -ENOMEM;
734
735 req_complete->req = request;
736 req_complete->orig_complete = request->complete;
737 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
738 request->complete = dwc3_msm_req_complete_func;
739
740 /*
741 * Configure the DBM endpoint
742 */
743 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
744 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
745 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
746 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
747 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
748
749 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
750 disable_wb, internal_mem, ioc);
751 if (ret < 0) {
752 dev_err(mdwc->dev,
753 "error %d after calling dbm_ep_config\n", ret);
754 return ret;
755 }
756
757 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
758 __func__, request, ep->name, request->length);
759 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
760 dbm_event_buffer_config(mdwc->dbm,
761 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
762 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
763 DWC3_GEVNTSIZ_SIZE(size));
764
765 /*
766 * We must obtain the lock of the dwc3 core driver,
767 * including disabling interrupts, so we will be sure
768 * that we are the only ones that configure the HW device
769 * core and ensure that we queuing the request will finish
770 * as soon as possible so we will release back the lock.
771 */
772 spin_lock_irqsave(&dwc->lock, flags);
773 if (!dep->endpoint.desc) {
774 dev_err(mdwc->dev,
775 "%s: trying to queue request %p to disabled ep %s\n",
776 __func__, request, ep->name);
777 ret = -EPERM;
778 goto err;
779 }
780
781 if (dep->number == 0 || dep->number == 1) {
782 dev_err(mdwc->dev,
783 "%s: trying to queue dbm request %p to control ep %s\n",
784 __func__, request, ep->name);
785 ret = -EPERM;
786 goto err;
787 }
788
789
Mayank Rana83ad5822016-08-09 14:17:22 -0700790 if (dep->trb_dequeue != dep->trb_enqueue ||
791 !list_empty(&dep->pending_list)
792 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700793 dev_err(mdwc->dev,
794 "%s: trying to queue dbm request %p tp ep %s\n",
795 __func__, request, ep->name);
796 ret = -EPERM;
797 goto err;
798 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700799 dep->trb_dequeue = 0;
800 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700801 }
802
803 ret = __dwc3_msm_ep_queue(dep, req);
804 if (ret < 0) {
805 dev_err(mdwc->dev,
806 "error %d after calling __dwc3_msm_ep_queue\n", ret);
807 goto err;
808 }
809
810 spin_unlock_irqrestore(&dwc->lock, flags);
811 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
812 dbm_set_speed(mdwc->dbm, (u8)superspeed);
813
814 return 0;
815
816err:
817 spin_unlock_irqrestore(&dwc->lock, flags);
818 kfree(req_complete);
819 return ret;
820}
821
822/*
823* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
824*
825* @usb_ep - pointer to usb_ep instance.
826*
827* @return int - XferRscIndex
828*/
829static inline int gsi_get_xfer_index(struct usb_ep *ep)
830{
831 struct dwc3_ep *dep = to_dwc3_ep(ep);
832
833 return dep->resource_index;
834}
835
836/*
837* Fills up the GSI channel information needed in call to IPA driver
838* for GSI channel creation.
839*
840* @usb_ep - pointer to usb_ep instance.
841* @ch_info - output parameter with requested channel info
842*/
843static void gsi_get_channel_info(struct usb_ep *ep,
844 struct gsi_channel_info *ch_info)
845{
846 struct dwc3_ep *dep = to_dwc3_ep(ep);
847 int last_trb_index = 0;
848 struct dwc3 *dwc = dep->dwc;
849 struct usb_gsi_request *request = ch_info->ch_req;
850
851 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
852 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Ranaac776d12017-04-18 16:56:13 -0700853 DWC3_DEP_BASE(dep->number) + DWC3_DEPCMD);
854
Mayank Rana511f3b22016-08-02 12:00:11 -0700855 ch_info->depcmd_hi_addr = 0;
856
857 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
858 &dep->trb_pool[0]);
859 /* Convert to multipled of 1KB */
860 ch_info->const_buffer_size = request->buf_len/1024;
861
862 /* IN direction */
863 if (dep->direction) {
864 /*
865 * Multiply by size of each TRB for xfer_ring_len in bytes.
866 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
867 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
868 */
869 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
870 last_trb_index = 2 * request->num_bufs + 2;
871 } else { /* OUT direction */
872 /*
873 * Multiply by size of each TRB for xfer_ring_len in bytes.
874 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
875 * LINK TRB.
876 */
Mayank Rana64d136b2016-11-01 21:01:34 -0700877 ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10;
878 last_trb_index = request->num_bufs + 2;
Mayank Rana511f3b22016-08-02 12:00:11 -0700879 }
880
881 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
882 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
883 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
884 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
885 DWC3_GEVNTCOUNT(ep->ep_intr_num));
886 ch_info->gevntcount_hi_addr = 0;
887
888 dev_dbg(dwc->dev,
889 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
890 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
891 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
892}
893
894/*
895* Perform StartXfer on GSI EP. Stores XferRscIndex.
896*
897* @usb_ep - pointer to usb_ep instance.
898*
899* @return int - 0 on success
900*/
901static int gsi_startxfer_for_ep(struct usb_ep *ep)
902{
903 int ret;
904 struct dwc3_gadget_ep_cmd_params params;
905 u32 cmd;
906 struct dwc3_ep *dep = to_dwc3_ep(ep);
907 struct dwc3 *dwc = dep->dwc;
908
909 memset(&params, 0, sizeof(params));
910 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
911 params.param0 |= (ep->ep_intr_num << 16);
912 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
913 &dep->trb_pool[0]));
914 cmd = DWC3_DEPCMD_STARTTRANSFER;
915 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700916 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700917
918 if (ret < 0)
919 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700920 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700921 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
922 return ret;
923}
924
925/*
926* Store Ring Base and Doorbell Address for GSI EP
927* for GSI channel creation.
928*
929* @usb_ep - pointer to usb_ep instance.
930* @dbl_addr - Doorbell address obtained from IPA driver
931*/
932static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
933{
934 struct dwc3_ep *dep = to_dwc3_ep(ep);
935 struct dwc3 *dwc = dep->dwc;
936 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
937 int n = ep->ep_intr_num - 1;
938
939 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
940 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
941 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
942
943 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
944 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
945 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
946 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
947}
948
949/*
Mayank Rana64d136b2016-11-01 21:01:34 -0700950* Rings Doorbell for GSI Channel
Mayank Rana511f3b22016-08-02 12:00:11 -0700951*
952* @usb_ep - pointer to usb_ep instance.
953* @request - pointer to GSI request. This is used to pass in the
954* address of the GSI doorbell obtained from IPA driver
955*/
Mayank Rana64d136b2016-11-01 21:01:34 -0700956static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
Mayank Rana511f3b22016-08-02 12:00:11 -0700957{
958 void __iomem *gsi_dbl_address_lsb;
959 void __iomem *gsi_dbl_address_msb;
960 dma_addr_t offset;
961 u64 dbl_addr = *((u64 *)request->buf_base_addr);
962 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
963 u32 dbl_hi_addr = (dbl_addr >> 32);
Mayank Rana511f3b22016-08-02 12:00:11 -0700964 struct dwc3_ep *dep = to_dwc3_ep(ep);
965 struct dwc3 *dwc = dep->dwc;
966 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Rana64d136b2016-11-01 21:01:34 -0700967 int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2)
968 : (request->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -0700969
970 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
971 dbl_lo_addr, sizeof(u32));
972 if (!gsi_dbl_address_lsb)
973 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
974
975 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
976 dbl_hi_addr, sizeof(u32));
977 if (!gsi_dbl_address_msb)
978 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
979
980 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
Mayank Rana64d136b2016-11-01 21:01:34 -0700981 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
982 &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
Mayank Rana511f3b22016-08-02 12:00:11 -0700983
984 writel_relaxed(offset, gsi_dbl_address_lsb);
985 writel_relaxed(0, gsi_dbl_address_msb);
986}
987
988/*
989* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
990*
991* @usb_ep - pointer to usb_ep instance.
992* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
993*
994* @return int - 0 on success
995*/
996static int gsi_updatexfer_for_ep(struct usb_ep *ep,
997 struct usb_gsi_request *request)
998{
999 int i;
1000 int ret;
1001 u32 cmd;
1002 int num_trbs = request->num_bufs + 1;
1003 struct dwc3_trb *trb;
1004 struct dwc3_gadget_ep_cmd_params params;
1005 struct dwc3_ep *dep = to_dwc3_ep(ep);
1006 struct dwc3 *dwc = dep->dwc;
1007
1008 for (i = 0; i < num_trbs - 1; i++) {
1009 trb = &dep->trb_pool[i];
1010 trb->ctrl |= DWC3_TRB_CTRL_HWO;
1011 }
1012
1013 memset(&params, 0, sizeof(params));
1014 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1015 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -07001016 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001017 dep->flags |= DWC3_EP_BUSY;
1018 if (ret < 0)
1019 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
1020 return ret;
1021}
1022
1023/*
1024* Perform EndXfer on particular GSI EP.
1025*
1026* @usb_ep - pointer to usb_ep instance.
1027*/
1028static void gsi_endxfer_for_ep(struct usb_ep *ep)
1029{
1030 struct dwc3_ep *dep = to_dwc3_ep(ep);
1031 struct dwc3 *dwc = dep->dwc;
1032
1033 dwc3_stop_active_transfer(dwc, dep->number, true);
1034}
1035
1036/*
1037* Allocates and configures TRBs for GSI EPs.
1038*
1039* @usb_ep - pointer to usb_ep instance.
1040* @request - pointer to GSI request.
1041*
1042* @return int - 0 on success
1043*/
1044static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1045{
1046 int i = 0;
1047 dma_addr_t buffer_addr = req->dma;
1048 struct dwc3_ep *dep = to_dwc3_ep(ep);
1049 struct dwc3 *dwc = dep->dwc;
1050 struct dwc3_trb *trb;
1051 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
Mayank Rana64d136b2016-11-01 21:01:34 -07001052 : (req->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -07001053
Jack Phambbe27962017-03-23 18:42:26 -07001054 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001055 num_trbs * sizeof(struct dwc3_trb),
1056 num_trbs * sizeof(struct dwc3_trb), 0);
1057 if (!dep->trb_dma_pool) {
1058 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1059 dep->name);
1060 return -ENOMEM;
1061 }
1062
1063 dep->num_trbs = num_trbs;
1064
1065 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1066 GFP_KERNEL, &dep->trb_pool_dma);
1067 if (!dep->trb_pool) {
1068 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1069 dep->name);
1070 return -ENOMEM;
1071 }
1072
1073 /* IN direction */
1074 if (dep->direction) {
1075 for (i = 0; i < num_trbs ; i++) {
1076 trb = &dep->trb_pool[i];
1077 memset(trb, 0, sizeof(*trb));
1078 /* Set up first n+1 TRBs for ZLPs */
1079 if (i < (req->num_bufs + 1)) {
1080 trb->bpl = 0;
1081 trb->bph = 0;
1082 trb->size = 0;
1083 trb->ctrl = DWC3_TRBCTL_NORMAL
1084 | DWC3_TRB_CTRL_IOC;
1085 continue;
1086 }
1087
1088 /* Setup n TRBs pointing to valid buffers */
1089 trb->bpl = lower_32_bits(buffer_addr);
1090 trb->bph = 0;
1091 trb->size = 0;
1092 trb->ctrl = DWC3_TRBCTL_NORMAL
1093 | DWC3_TRB_CTRL_IOC;
1094 buffer_addr += req->buf_len;
1095
1096 /* Set up the Link TRB at the end */
1097 if (i == (num_trbs - 1)) {
1098 trb->bpl = dwc3_trb_dma_offset(dep,
1099 &dep->trb_pool[0]);
1100 trb->bph = (1 << 23) | (1 << 21)
1101 | (ep->ep_intr_num << 16);
1102 trb->size = 0;
1103 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1104 | DWC3_TRB_CTRL_HWO;
1105 }
1106 }
1107 } else { /* OUT direction */
1108
1109 for (i = 0; i < num_trbs ; i++) {
1110
1111 trb = &dep->trb_pool[i];
1112 memset(trb, 0, sizeof(*trb));
Mayank Rana64d136b2016-11-01 21:01:34 -07001113 /* Setup LINK TRB to start with TRB ring */
1114 if (i == 0) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001115 trb->bpl = dwc3_trb_dma_offset(dep,
Mayank Rana64d136b2016-11-01 21:01:34 -07001116 &dep->trb_pool[1]);
1117 trb->ctrl = DWC3_TRBCTL_LINK_TRB;
1118 } else if (i == (num_trbs - 1)) {
1119 /* Set up the Link TRB at the end */
1120 trb->bpl = dwc3_trb_dma_offset(dep,
1121 &dep->trb_pool[0]);
Mayank Rana511f3b22016-08-02 12:00:11 -07001122 trb->bph = (1 << 23) | (1 << 21)
1123 | (ep->ep_intr_num << 16);
Mayank Rana511f3b22016-08-02 12:00:11 -07001124 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1125 | DWC3_TRB_CTRL_HWO;
Mayank Rana64d136b2016-11-01 21:01:34 -07001126 } else {
1127 trb->bpl = lower_32_bits(buffer_addr);
1128 trb->size = req->buf_len;
1129 buffer_addr += req->buf_len;
1130 trb->ctrl = DWC3_TRBCTL_NORMAL
1131 | DWC3_TRB_CTRL_IOC
1132 | DWC3_TRB_CTRL_CSP
1133 | DWC3_TRB_CTRL_ISP_IMI;
Mayank Rana511f3b22016-08-02 12:00:11 -07001134 }
1135 }
1136 }
Mayank Rana64d136b2016-11-01 21:01:34 -07001137
1138 pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
1139 trb = &dep->trb_pool[0];
1140 if (trb) {
1141 for (i = 0; i < num_trbs; i++) {
1142 pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
1143 i, (unsigned long)dwc3_trb_dma_offset(dep,
1144 &dep->trb_pool[i]), trb->bpl, trb->bph,
1145 trb->size, trb->ctrl);
1146 trb++;
1147 }
1148 }
1149
Mayank Rana511f3b22016-08-02 12:00:11 -07001150 return 0;
1151}
1152
1153/*
1154* Frees TRBs for GSI EPs.
1155*
1156* @usb_ep - pointer to usb_ep instance.
1157*
1158*/
1159static void gsi_free_trbs(struct usb_ep *ep)
1160{
1161 struct dwc3_ep *dep = to_dwc3_ep(ep);
1162
1163 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1164 return;
1165
1166 /* Free TRBs and TRB pool for EP */
1167 if (dep->trb_dma_pool) {
1168 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1169 dep->trb_pool_dma);
1170 dma_pool_destroy(dep->trb_dma_pool);
1171 dep->trb_pool = NULL;
1172 dep->trb_pool_dma = 0;
1173 dep->trb_dma_pool = NULL;
1174 }
1175}
1176/*
1177* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1178*
1179* @usb_ep - pointer to usb_ep instance.
1180* @request - pointer to GSI request.
1181*/
1182static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1183{
1184 struct dwc3_ep *dep = to_dwc3_ep(ep);
1185 struct dwc3 *dwc = dep->dwc;
1186 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1187 struct dwc3_gadget_ep_cmd_params params;
1188 const struct usb_endpoint_descriptor *desc = ep->desc;
1189 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
Mayank Ranaac1200c2017-04-25 13:48:46 -07001190 u32 reg;
1191 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001192
1193 memset(&params, 0x00, sizeof(params));
1194
1195 /* Configure GSI EP */
1196 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1197 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1198
1199 /* Burst size is only needed in SuperSpeed mode */
1200 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1201 u32 burst = dep->endpoint.maxburst - 1;
1202
1203 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1204 }
1205
1206 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1207 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1208 | DWC3_DEPCFG_STREAM_EVENT_EN;
1209 dep->stream_capable = true;
1210 }
1211
1212 /* Set EP number */
1213 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1214
1215 /* Set interrupter number for GSI endpoints */
1216 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1217
1218 /* Enable XferInProgress and XferComplete Interrupts */
1219 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1220 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1221 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1222 /*
1223 * We must use the lower 16 TX FIFOs even though
1224 * HW might have more
1225 */
1226 /* Remove FIFO Number for GSI EP*/
1227 if (dep->direction)
1228 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1229
1230 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1231
1232 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1233 params.param0, params.param1, params.param2, dep->name);
1234
Mayank Rana83ad5822016-08-09 14:17:22 -07001235 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001236
1237 /* Set XferRsc Index for GSI EP */
1238 if (!(dep->flags & DWC3_EP_ENABLED)) {
Mayank Ranaac1200c2017-04-25 13:48:46 -07001239 ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
1240 if (ret)
1241 return;
1242
Mayank Rana511f3b22016-08-02 12:00:11 -07001243 memset(&params, 0x00, sizeof(params));
1244 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001245 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001246 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1247
1248 dep->endpoint.desc = desc;
1249 dep->comp_desc = comp_desc;
1250 dep->type = usb_endpoint_type(desc);
1251 dep->flags |= DWC3_EP_ENABLED;
1252 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1253 reg |= DWC3_DALEPENA_EP(dep->number);
1254 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1255 }
1256
1257}
1258
1259/*
1260* Enables USB wrapper for GSI
1261*
1262* @usb_ep - pointer to usb_ep instance.
1263*/
1264static void gsi_enable(struct usb_ep *ep)
1265{
1266 struct dwc3_ep *dep = to_dwc3_ep(ep);
1267 struct dwc3 *dwc = dep->dwc;
1268 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1269
1270 dwc3_msm_write_reg_field(mdwc->base,
1271 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1272 dwc3_msm_write_reg_field(mdwc->base,
1273 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1274 dwc3_msm_write_reg_field(mdwc->base,
1275 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1276 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1277 dwc3_msm_write_reg_field(mdwc->base,
1278 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1279}
1280
1281/*
1282* Block or allow doorbell towards GSI
1283*
1284* @usb_ep - pointer to usb_ep instance.
1285* @request - pointer to GSI request. In this case num_bufs is used as a bool
1286* to set or clear the doorbell bit
1287*/
1288static void gsi_set_clear_dbell(struct usb_ep *ep,
1289 bool block_db)
1290{
1291
1292 struct dwc3_ep *dep = to_dwc3_ep(ep);
1293 struct dwc3 *dwc = dep->dwc;
1294 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1295
1296 dwc3_msm_write_reg_field(mdwc->base,
1297 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1298}
1299
1300/*
1301* Performs necessary checks before stopping GSI channels
1302*
1303* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1304*/
1305static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1306{
1307 u32 timeout = 1500;
1308 u32 reg = 0;
1309 struct dwc3_ep *dep = to_dwc3_ep(ep);
1310 struct dwc3 *dwc = dep->dwc;
1311 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1312
1313 while (dwc3_msm_read_reg_field(mdwc->base,
1314 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1315 if (!timeout--) {
1316 dev_err(mdwc->dev,
1317 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1318 return false;
1319 }
1320 }
1321 /* Check for U3 only if we are not handling Function Suspend */
1322 if (!f_suspend) {
1323 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1324 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1325 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1326 return false;
1327 }
1328 }
1329
1330 return true;
1331}
1332
1333
1334/**
1335* Performs GSI operations or GSI EP related operations.
1336*
1337* @usb_ep - pointer to usb_ep instance.
1338* @op_data - pointer to opcode related data.
1339* @op - GSI related or GSI EP related op code.
1340*
1341* @return int - 0 on success, negative on error.
1342* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1343*/
1344static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1345 void *op_data, enum gsi_ep_op op)
1346{
1347 u32 ret = 0;
1348 struct dwc3_ep *dep = to_dwc3_ep(ep);
1349 struct dwc3 *dwc = dep->dwc;
1350 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1351 struct usb_gsi_request *request;
1352 struct gsi_channel_info *ch_info;
1353 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001354 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001355
1356 switch (op) {
1357 case GSI_EP_OP_PREPARE_TRBS:
1358 request = (struct usb_gsi_request *)op_data;
1359 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1360 ret = gsi_prepare_trbs(ep, request);
1361 break;
1362 case GSI_EP_OP_FREE_TRBS:
1363 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1364 gsi_free_trbs(ep);
1365 break;
1366 case GSI_EP_OP_CONFIG:
1367 request = (struct usb_gsi_request *)op_data;
1368 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001369 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001370 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001371 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001372 break;
1373 case GSI_EP_OP_STARTXFER:
1374 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001375 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001376 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001377 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001378 break;
1379 case GSI_EP_OP_GET_XFER_IDX:
1380 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1381 ret = gsi_get_xfer_index(ep);
1382 break;
1383 case GSI_EP_OP_STORE_DBL_INFO:
1384 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1385 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1386 break;
1387 case GSI_EP_OP_ENABLE_GSI:
1388 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1389 gsi_enable(ep);
1390 break;
1391 case GSI_EP_OP_GET_CH_INFO:
1392 ch_info = (struct gsi_channel_info *)op_data;
1393 gsi_get_channel_info(ep, ch_info);
1394 break;
Mayank Rana64d136b2016-11-01 21:01:34 -07001395 case GSI_EP_OP_RING_DB:
Mayank Rana511f3b22016-08-02 12:00:11 -07001396 request = (struct usb_gsi_request *)op_data;
Mayank Rana64d136b2016-11-01 21:01:34 -07001397 dbg_print(0xFF, "RING_DB", 0, ep->name);
1398 gsi_ring_db(ep, request);
Mayank Rana511f3b22016-08-02 12:00:11 -07001399 break;
1400 case GSI_EP_OP_UPDATEXFER:
1401 request = (struct usb_gsi_request *)op_data;
1402 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001403 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001404 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001405 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001406 break;
1407 case GSI_EP_OP_ENDXFER:
1408 request = (struct usb_gsi_request *)op_data;
1409 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001410 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001411 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001412 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001413 break;
1414 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1415 block_db = *((bool *)op_data);
1416 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1417 block_db);
1418 gsi_set_clear_dbell(ep, block_db);
1419 break;
1420 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1421 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1422 f_suspend = *((bool *)op_data);
1423 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1424 break;
1425 case GSI_EP_OP_DISABLE:
1426 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1427 ret = ep->ops->disable(ep);
1428 break;
1429 default:
1430 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1431 }
1432
1433 return ret;
1434}
1435
1436/**
1437 * Configure MSM endpoint.
1438 * This function do specific configurations
1439 * to an endpoint which need specific implementaion
1440 * in the MSM architecture.
1441 *
1442 * This function should be called by usb function/class
1443 * layer which need a support from the specific MSM HW
1444 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1445 *
1446 * @ep - a pointer to some usb_ep instance
1447 *
1448 * @return int - 0 on success, negetive on error.
1449 */
1450int msm_ep_config(struct usb_ep *ep)
1451{
1452 struct dwc3_ep *dep = to_dwc3_ep(ep);
1453 struct dwc3 *dwc = dep->dwc;
1454 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1455 struct usb_ep_ops *new_ep_ops;
1456
1457
1458 /* Save original ep ops for future restore*/
1459 if (mdwc->original_ep_ops[dep->number]) {
1460 dev_err(mdwc->dev,
1461 "ep [%s,%d] already configured as msm endpoint\n",
1462 ep->name, dep->number);
1463 return -EPERM;
1464 }
1465 mdwc->original_ep_ops[dep->number] = ep->ops;
1466
1467 /* Set new usb ops as we like */
1468 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1469 if (!new_ep_ops)
1470 return -ENOMEM;
1471
1472 (*new_ep_ops) = (*ep->ops);
1473 new_ep_ops->queue = dwc3_msm_ep_queue;
1474 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1475 ep->ops = new_ep_ops;
1476
1477 /*
1478 * Do HERE more usb endpoint configurations
1479 * which are specific to MSM.
1480 */
1481
1482 return 0;
1483}
1484EXPORT_SYMBOL(msm_ep_config);
1485
1486/**
1487 * Un-configure MSM endpoint.
1488 * Tear down configurations done in the
1489 * dwc3_msm_ep_config function.
1490 *
1491 * @ep - a pointer to some usb_ep instance
1492 *
1493 * @return int - 0 on success, negative on error.
1494 */
1495int msm_ep_unconfig(struct usb_ep *ep)
1496{
1497 struct dwc3_ep *dep = to_dwc3_ep(ep);
1498 struct dwc3 *dwc = dep->dwc;
1499 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1500 struct usb_ep_ops *old_ep_ops;
1501
1502 /* Restore original ep ops */
1503 if (!mdwc->original_ep_ops[dep->number]) {
1504 dev_err(mdwc->dev,
1505 "ep [%s,%d] was not configured as msm endpoint\n",
1506 ep->name, dep->number);
1507 return -EINVAL;
1508 }
1509 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1510 ep->ops = mdwc->original_ep_ops[dep->number];
1511 mdwc->original_ep_ops[dep->number] = NULL;
1512 kfree(old_ep_ops);
1513
1514 /*
1515 * Do HERE more usb endpoint un-configurations
1516 * which are specific to MSM.
1517 */
1518
1519 return 0;
1520}
1521EXPORT_SYMBOL(msm_ep_unconfig);
1522#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1523
1524static void dwc3_resume_work(struct work_struct *w);
1525
1526static void dwc3_restart_usb_work(struct work_struct *w)
1527{
1528 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1529 restart_usb_work);
1530 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1531 unsigned int timeout = 50;
1532
1533 dev_dbg(mdwc->dev, "%s\n", __func__);
1534
1535 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1536 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1537 return;
1538 }
1539
1540 /* guard against concurrent VBUS handling */
1541 mdwc->in_restart = true;
1542
1543 if (!mdwc->vbus_active) {
1544 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1545 dwc->err_evt_seen = false;
1546 mdwc->in_restart = false;
1547 return;
1548 }
1549
Mayank Rana08e41922017-03-02 15:25:48 -08001550 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001551 /* Reset active USB connection */
1552 dwc3_resume_work(&mdwc->resume_work);
1553
1554 /* Make sure disconnect is processed before sending connect */
1555 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1556 msleep(20);
1557
1558 if (!timeout) {
1559 dev_dbg(mdwc->dev,
1560 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001561 dbg_event(0xFF, "ReStart:RT SUSP",
1562 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001563 pm_runtime_suspend(mdwc->dev);
1564 }
1565
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301566 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001567 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301568 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001569 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001570
1571 dwc->err_evt_seen = false;
1572 flush_delayed_work(&mdwc->sm_work);
1573}
1574
Manu Gautam976fdfc2016-08-18 09:27:35 +05301575static int msm_dwc3_usbdev_notify(struct notifier_block *self,
1576 unsigned long action, void *priv)
1577{
1578 struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
1579 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1580 struct usb_bus *bus = priv;
1581
1582 /* Interested only in recovery when HC dies */
1583 if (action != USB_BUS_DIED)
1584 return 0;
1585
1586 dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
1587 /* Recovery already under process */
1588 if (mdwc->hc_died)
1589 return 0;
1590
1591 if (bus->controller != &dwc->xhci->dev) {
1592 dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
1593 return 0;
1594 }
1595
1596 mdwc->hc_died = true;
1597 schedule_delayed_work(&mdwc->sm_work, 0);
1598 return 0;
1599}
1600
1601
Mayank Rana511f3b22016-08-02 12:00:11 -07001602/*
1603 * Check whether the DWC3 requires resetting the ep
1604 * after going to Low Power Mode (lpm)
1605 */
1606bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1607{
1608 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1609 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1610
1611 return dbm_reset_ep_after_lpm(mdwc->dbm);
1612}
1613EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1614
1615/*
1616 * Config Global Distributed Switch Controller (GDSC)
1617 * to support controller power collapse
1618 */
1619static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1620{
1621 int ret;
1622
1623 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1624 return -EPERM;
1625
1626 if (on) {
1627 ret = regulator_enable(mdwc->dwc3_gdsc);
1628 if (ret) {
1629 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1630 return ret;
1631 }
1632 } else {
1633 ret = regulator_disable(mdwc->dwc3_gdsc);
1634 if (ret) {
1635 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1636 return ret;
1637 }
1638 }
1639
1640 return ret;
1641}
1642
1643static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1644{
1645 int ret = 0;
1646
1647 if (assert) {
Mayank Ranad339abe2017-05-31 09:19:49 -07001648 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001649 /* Using asynchronous block reset to the hardware */
1650 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1651 clk_disable_unprepare(mdwc->utmi_clk);
1652 clk_disable_unprepare(mdwc->sleep_clk);
1653 clk_disable_unprepare(mdwc->core_clk);
1654 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301655 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001656 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301657 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001658 } else {
1659 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301660 ret = reset_control_deassert(mdwc->core_reset);
1661 if (ret)
1662 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001663 ndelay(200);
1664 clk_prepare_enable(mdwc->iface_clk);
1665 clk_prepare_enable(mdwc->core_clk);
1666 clk_prepare_enable(mdwc->sleep_clk);
1667 clk_prepare_enable(mdwc->utmi_clk);
Mayank Ranad339abe2017-05-31 09:19:49 -07001668 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001669 }
1670
1671 return ret;
1672}
1673
1674static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1675{
1676 u32 guctl, gfladj = 0;
1677
1678 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1679 guctl &= ~DWC3_GUCTL_REFCLKPER;
1680
1681 /* GFLADJ register is used starting with revision 2.50a */
1682 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1683 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1684 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1685 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1686 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1687 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1688 }
1689
1690 /* Refer to SNPS Databook Table 6-55 for calculations used */
1691 switch (mdwc->utmi_clk_rate) {
1692 case 19200000:
1693 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1694 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1695 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1696 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1697 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1698 break;
1699 case 24000000:
1700 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1701 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1702 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1703 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1704 break;
1705 default:
1706 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1707 mdwc->utmi_clk_rate);
1708 break;
1709 }
1710
1711 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1712 if (gfladj)
1713 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1714}
1715
1716/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1717static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1718{
1719 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1720 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1721 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1722 BIT(2), 1);
1723
1724 /*
1725 * Enable master clock for RAMs to allow BAM to access RAMs when
1726 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1727 * are seen where RAM clocks get turned OFF in SS mode
1728 */
1729 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1730 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1731
1732}
1733
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001734static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1735{
1736 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1737 vbus_draw_work);
1738 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1739
1740 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1741}
1742
Mayank Rana511f3b22016-08-02 12:00:11 -07001743static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1744{
1745 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001746 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001747 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001748 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001749
1750 switch (event) {
1751 case DWC3_CONTROLLER_ERROR_EVENT:
1752 dev_info(mdwc->dev,
1753 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1754 dwc->irq_cnt);
1755
1756 dwc3_gadget_disable_irq(dwc);
1757
1758 /* prevent core from generating interrupts until recovery */
1759 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1760 reg |= DWC3_GCTL_CORESOFTRESET;
1761 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1762
1763 /* restart USB which performs full reset and reconnect */
1764 schedule_work(&mdwc->restart_usb_work);
1765 break;
1766 case DWC3_CONTROLLER_RESET_EVENT:
1767 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1768 /* HS & SSPHYs get reset as part of core soft reset */
1769 dwc3_msm_qscratch_reg_init(mdwc);
1770 break;
1771 case DWC3_CONTROLLER_POST_RESET_EVENT:
1772 dev_dbg(mdwc->dev,
1773 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1774
1775 /*
1776 * Below sequence is used when controller is working without
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301777 * having ssphy and only USB high/full speed is supported.
Mayank Rana511f3b22016-08-02 12:00:11 -07001778 */
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301779 if (dwc->maximum_speed == USB_SPEED_HIGH ||
1780 dwc->maximum_speed == USB_SPEED_FULL) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001781 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1782 dwc3_msm_read_reg(mdwc->base,
1783 QSCRATCH_GENERAL_CFG)
1784 | PIPE_UTMI_CLK_DIS);
1785
1786 usleep_range(2, 5);
1787
1788
1789 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1790 dwc3_msm_read_reg(mdwc->base,
1791 QSCRATCH_GENERAL_CFG)
1792 | PIPE_UTMI_CLK_SEL
1793 | PIPE3_PHYSTATUS_SW);
1794
1795 usleep_range(2, 5);
1796
1797 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1798 dwc3_msm_read_reg(mdwc->base,
1799 QSCRATCH_GENERAL_CFG)
1800 & ~PIPE_UTMI_CLK_DIS);
1801 }
1802
1803 dwc3_msm_update_ref_clk(mdwc);
1804 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1805 break;
1806 case DWC3_CONTROLLER_CONNDONE_EVENT:
1807 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1808 /*
1809 * Add power event if the dbm indicates coming out of L1 by
1810 * interrupt
1811 */
1812 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1813 dwc3_msm_write_reg_field(mdwc->base,
1814 PWR_EVNT_IRQ_MASK_REG,
1815 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1816
1817 atomic_set(&dwc->in_lpm, 0);
1818 break;
1819 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1820 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1821 if (dwc->enable_bus_suspend) {
1822 mdwc->suspend = dwc->b_suspend;
1823 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1824 }
1825 break;
1826 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1827 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001828 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001829 break;
1830 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1831 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001832 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001833 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001834 case DWC3_GSI_EVT_BUF_ALLOC:
1835 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1836
1837 if (!mdwc->num_gsi_event_buffers)
1838 break;
1839
1840 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1841 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1842 GFP_KERNEL);
1843 if (!mdwc->gsi_ev_buff) {
1844 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1845 break;
1846 }
1847
1848 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1849
1850 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1851 if (!evt)
1852 break;
1853 evt->dwc = dwc;
1854 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1855 evt->buf = dma_alloc_coherent(dwc->dev,
1856 DWC3_EVENT_BUFFERS_SIZE,
1857 &evt->dma, GFP_KERNEL);
1858 if (!evt->buf) {
1859 dev_err(dwc->dev,
1860 "can't allocate gsi_evt_buf(%d)\n", i);
1861 break;
1862 }
1863 mdwc->gsi_ev_buff[i] = evt;
1864 }
1865 break;
1866 case DWC3_GSI_EVT_BUF_SETUP:
1867 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1868 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1869 evt = mdwc->gsi_ev_buff[i];
1870 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1871 evt->buf, (unsigned long long) evt->dma,
1872 evt->length);
1873 memset(evt->buf, 0, evt->length);
1874 evt->lpos = 0;
1875 /*
1876 * Primary event buffer is programmed with registers
1877 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1878 * program USB GSI related event buffer with DWC3
1879 * controller.
1880 */
1881 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1882 lower_32_bits(evt->dma));
1883 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1884 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1885 DWC3_GEVENT_TYPE_GSI) |
1886 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1887 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1888 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1889 ((evt->length) & 0xffff));
1890 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1891 }
1892 break;
1893 case DWC3_GSI_EVT_BUF_CLEANUP:
1894 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
1895 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1896 evt = mdwc->gsi_ev_buff[i];
1897 evt->lpos = 0;
1898 /*
1899 * Primary event buffer is programmed with registers
1900 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1901 * program USB GSI related event buffer with DWC3
1902 * controller.
1903 */
1904 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1905 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1906 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1907 DWC3_GEVNTSIZ_INTMASK |
1908 DWC3_GEVNTSIZ_SIZE((i+1)));
1909 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1910 }
1911 break;
1912 case DWC3_GSI_EVT_BUF_FREE:
1913 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
1914 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1915 evt = mdwc->gsi_ev_buff[i];
1916 if (evt)
1917 dma_free_coherent(dwc->dev, evt->length,
1918 evt->buf, evt->dma);
1919 }
1920 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001921 default:
1922 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1923 break;
1924 }
1925}
1926
1927static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1928{
1929 int ret = 0;
1930
1931 if (core_reset) {
1932 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1933 if (ret)
1934 return;
1935
1936 usleep_range(1000, 1200);
1937 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1938 if (ret)
1939 return;
1940
1941 usleep_range(10000, 12000);
1942 }
1943
1944 if (mdwc->dbm) {
1945 /* Reset the DBM */
1946 dbm_soft_reset(mdwc->dbm, 1);
1947 usleep_range(1000, 1200);
1948 dbm_soft_reset(mdwc->dbm, 0);
1949
1950 /*enable DBM*/
1951 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1952 DBM_EN_MASK, 0x1);
1953 dbm_enable(mdwc->dbm);
1954 }
1955}
1956
1957static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1958{
1959 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1960 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301961 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001962
1963 /* Configure AHB2PHY for one wait state read/write */
1964 if (mdwc->ahb2phy_base) {
1965 clk_prepare_enable(mdwc->cfg_ahb_clk);
1966 val = readl_relaxed(mdwc->ahb2phy_base +
1967 PERIPH_SS_AHB2PHY_TOP_CFG);
1968 if (val != ONE_READ_WRITE_WAIT) {
1969 writel_relaxed(ONE_READ_WRITE_WAIT,
1970 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1971 /* complete above write before configuring USB PHY. */
1972 mb();
1973 }
1974 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1975 }
1976
1977 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001978 dbg_event(0xFF, "dwc3 init",
1979 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301980 ret = dwc3_core_pre_init(dwc);
1981 if (ret) {
1982 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1983 return;
1984 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001985 mdwc->init = true;
1986 }
1987
1988 dwc3_core_init(dwc);
1989 /* Re-configure event buffers */
1990 dwc3_event_buffers_setup(dwc);
1991}
1992
1993static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1994{
1995 unsigned long timeout;
1996 u32 reg = 0;
1997
1998 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05301999 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002000 if (!atomic_read(&mdwc->in_p3)) {
2001 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
2002 return -EBUSY;
2003 }
2004 }
2005
2006 /* Clear previous L2 events */
2007 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2008 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
2009
2010 /* Prepare HSPHY for suspend */
2011 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
2012 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2013 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
2014
2015 /* Wait for PHY to go into L2 */
2016 timeout = jiffies + msecs_to_jiffies(5);
2017 while (!time_after(jiffies, timeout)) {
2018 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2019 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
2020 break;
2021 }
2022 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
2023 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
2024
2025 /* Clear L2 event bit */
2026 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2027 PWR_EVNT_LPM_IN_L2_MASK);
2028
2029 return 0;
2030}
2031
Mayank Rana511f3b22016-08-02 12:00:11 -07002032static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
2033{
2034 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2035 int i, num_ports;
2036 u32 reg;
2037
2038 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2039 if (mdwc->in_host_mode) {
2040 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
2041 num_ports = HCS_MAX_PORTS(reg);
2042 for (i = 0; i < num_ports; i++) {
2043 reg = dwc3_msm_read_reg(mdwc->base,
2044 USB3_PORTSC + i*0x10);
2045 if (reg & PORT_PE) {
2046 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
2047 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2048 else if (DEV_LOWSPEED(reg))
2049 mdwc->hs_phy->flags |= PHY_LS_MODE;
2050 }
2051 }
2052 } else {
2053 if (dwc->gadget.speed == USB_SPEED_HIGH ||
2054 dwc->gadget.speed == USB_SPEED_FULL)
2055 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2056 else if (dwc->gadget.speed == USB_SPEED_LOW)
2057 mdwc->hs_phy->flags |= PHY_LS_MODE;
2058 }
2059}
2060
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302061static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
2062 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07002063
Mayank Ranad339abe2017-05-31 09:19:49 -07002064static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc,
2065 struct usb_irq *uirq, unsigned int polarity, bool enable)
2066{
2067 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2068
2069 if (uirq && enable && !uirq->enable) {
2070 dbg_event(0xFF, "PDC_IRQ_EN", uirq->irq);
2071 dbg_event(0xFF, "PDC_IRQ_POL", polarity);
2072 /* clear any pending interrupt */
2073 irq_set_irqchip_state(uirq->irq, IRQCHIP_STATE_PENDING, 0);
2074 irq_set_irq_type(uirq->irq, polarity);
2075 enable_irq_wake(uirq->irq);
2076 enable_irq(uirq->irq);
2077 uirq->enable = true;
2078 }
2079
2080 if (uirq && !enable && uirq->enable) {
2081 dbg_event(0xFF, "PDC_IRQ_DIS", uirq->irq);
2082 disable_irq_wake(uirq->irq);
2083 disable_irq_nosync(uirq->irq);
2084 uirq->enable = false;
2085 }
2086}
2087
2088static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable)
2089{
2090 if (!enable)
2091 goto disable_usb_irq;
2092
2093 if (mdwc->hs_phy->flags & PHY_LS_MODE) {
2094 configure_usb_wakeup_interrupt(mdwc,
2095 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2096 IRQ_TYPE_EDGE_FALLING, enable);
2097 } else if (mdwc->hs_phy->flags & PHY_HSFS_MODE) {
2098 configure_usb_wakeup_interrupt(mdwc,
2099 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2100 IRQ_TYPE_EDGE_FALLING, enable);
2101 } else {
2102 configure_usb_wakeup_interrupt(mdwc,
2103 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2104 IRQ_TYPE_EDGE_RISING, true);
2105 configure_usb_wakeup_interrupt(mdwc,
2106 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2107 IRQ_TYPE_EDGE_RISING, true);
2108 }
2109
2110 configure_usb_wakeup_interrupt(mdwc,
2111 &mdwc->wakeup_irq[SS_PHY_IRQ],
2112 IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH, enable);
2113 return;
2114
2115disable_usb_irq:
2116 configure_usb_wakeup_interrupt(mdwc,
2117 &mdwc->wakeup_irq[DP_HS_PHY_IRQ], 0, enable);
2118 configure_usb_wakeup_interrupt(mdwc,
2119 &mdwc->wakeup_irq[DM_HS_PHY_IRQ], 0, enable);
2120 configure_usb_wakeup_interrupt(mdwc,
2121 &mdwc->wakeup_irq[SS_PHY_IRQ], 0, enable);
2122}
2123
2124static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
2125 struct usb_irq *uirq, bool enable)
2126{
2127 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2128
2129 if (uirq && enable && !uirq->enable) {
2130 dbg_event(0xFF, "IRQ_EN", uirq->irq);
2131 enable_irq_wake(uirq->irq);
2132 enable_irq(uirq->irq);
2133 uirq->enable = true;
2134 }
2135
2136 if (uirq && !enable && uirq->enable) {
2137 dbg_event(0xFF, "IRQ_DIS", uirq->irq);
2138 disable_irq_wake(uirq->irq);
2139 disable_irq_nosync(uirq->irq);
2140 uirq->enable = true;
2141 }
2142}
2143
Mayank Rana511f3b22016-08-02 12:00:11 -07002144static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
2145{
Mayank Rana83ad5822016-08-09 14:17:22 -07002146 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07002147 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07002148 struct dwc3_event_buffer *evt;
Mayank Ranad339abe2017-05-31 09:19:49 -07002149 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002150
2151 if (atomic_read(&dwc->in_lpm)) {
2152 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
2153 return 0;
2154 }
2155
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302156 cancel_delayed_work_sync(&mdwc->perf_vote_work);
2157 msm_dwc3_perf_vote_update(mdwc, false);
2158
Mayank Rana511f3b22016-08-02 12:00:11 -07002159 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07002160 evt = dwc->ev_buf;
2161 if ((evt->flags & DWC3_EVENT_PENDING)) {
2162 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002163 "%s: %d device events pending, abort suspend\n",
2164 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07002165 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002166 }
2167 }
2168
2169 if (!mdwc->vbus_active && dwc->is_drd &&
2170 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2171 /*
2172 * In some cases, the pm_runtime_suspend may be called by
2173 * usb_bam when there is pending lpm flag. However, if this is
2174 * done when cable was disconnected and otg state has not
2175 * yet changed to IDLE, then it means OTG state machine
2176 * is running and we race against it. So cancel LPM for now,
2177 * and OTG state machine will go for LPM later, after completing
2178 * transition to IDLE state.
2179 */
2180 dev_dbg(mdwc->dev,
2181 "%s: cable disconnected while not in idle otg state\n",
2182 __func__);
2183 return -EBUSY;
2184 }
2185
2186 /*
2187 * Check if device is not in CONFIGURED state
2188 * then check controller state of L2 and break
2189 * LPM sequence. Check this for device bus suspend case.
2190 */
2191 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2192 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2193 pr_err("%s(): Trying to go in LPM with state:%d\n",
2194 __func__, dwc->gadget.state);
2195 pr_err("%s(): LPM is not performed.\n", __func__);
2196 return -EBUSY;
2197 }
2198
2199 ret = dwc3_msm_prepare_suspend(mdwc);
2200 if (ret)
2201 return ret;
2202
Mayank Rana511f3b22016-08-02 12:00:11 -07002203 /* Disable core irq */
2204 if (dwc->irq)
2205 disable_irq(dwc->irq);
2206
Mayank Ranaf616a7f2017-03-20 16:10:39 -07002207 if (work_busy(&dwc->bh_work))
2208 dbg_event(0xFF, "pend evt", 0);
2209
Mayank Rana511f3b22016-08-02 12:00:11 -07002210 /* disable power event irq, hs and ss phy irq is used as wake up src */
Mayank Ranad339abe2017-05-31 09:19:49 -07002211 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07002212
2213 dwc3_set_phy_speed_flags(mdwc);
2214 /* Suspend HS PHY */
2215 usb_phy_set_suspend(mdwc->hs_phy, 1);
2216
2217 /* Suspend SS PHY */
Mayank Rana17f67e32017-08-15 10:41:28 -07002218 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002219 /* indicate phy about SS mode */
2220 if (dwc3_msm_is_superspeed(mdwc))
2221 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2222 usb_phy_set_suspend(mdwc->ss_phy, 1);
2223 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2224 }
2225
2226 /* make sure above writes are completed before turning off clocks */
2227 wmb();
2228
2229 /* Disable clocks */
2230 if (mdwc->bus_aggr_clk)
2231 clk_disable_unprepare(mdwc->bus_aggr_clk);
2232 clk_disable_unprepare(mdwc->utmi_clk);
2233
Hemant Kumar633dc332016-08-10 13:41:05 -07002234 /* Memory core: OFF, Memory periphery: OFF */
2235 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2236 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2237 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2238 }
2239
Mayank Rana511f3b22016-08-02 12:00:11 -07002240 clk_set_rate(mdwc->core_clk, 19200000);
2241 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302242 if (mdwc->noc_aggr_clk)
2243 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002244 /*
2245 * Disable iface_clk only after core_clk as core_clk has FSM
2246 * depedency on iface_clk. Hence iface_clk should be turned off
2247 * after core_clk is turned off.
2248 */
2249 clk_disable_unprepare(mdwc->iface_clk);
2250 /* USB PHY no more requires TCXO */
2251 clk_disable_unprepare(mdwc->xo_clk);
2252
2253 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002254 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002255 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2256 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2257 dwc3_msm_config_gdsc(mdwc, 0);
2258 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002259
Jack Pham9faa51df2017-04-03 18:13:40 -07002260 if (mdwc->iommu_map) {
Jack Phambbe27962017-03-23 18:42:26 -07002261 arm_iommu_detach_device(mdwc->dev);
Jack Pham9faa51df2017-04-03 18:13:40 -07002262 dev_dbg(mdwc->dev, "IOMMU detached\n");
2263 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002264 }
2265
2266 /* Remove bus voting */
2267 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002268 dbg_event(0xFF, "bus_devote_start", 0);
2269 ret = msm_bus_scale_client_update_request(
2270 mdwc->bus_perf_client, 0);
2271 dbg_event(0xFF, "bus_devote_finish", 0);
2272 if (ret)
2273 dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002274 }
2275
2276 /*
2277 * release wakeup source with timeout to defer system suspend to
2278 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2279 * event is received.
2280 */
2281 if (mdwc->lpm_to_suspend_delay) {
2282 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2283 mdwc->lpm_to_suspend_delay);
2284 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2285 } else {
2286 pm_relax(mdwc->dev);
2287 }
2288
2289 atomic_set(&dwc->in_lpm, 1);
2290
2291 /*
2292 * with DCP or during cable disconnect, we dont require wakeup
2293 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2294 * case of host bus suspend and device bus suspend.
2295 */
2296 if (mdwc->vbus_active || mdwc->in_host_mode) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002297 if (mdwc->use_pdc_interrupts) {
2298 enable_usb_pdc_interrupt(mdwc, true);
2299 } else {
2300 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2301 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
2302 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2303 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
Mayank Rana511f3b22016-08-02 12:00:11 -07002304 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002305 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2306 }
2307
2308 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2309 return 0;
2310}
2311
2312static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2313{
2314 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002315 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002316 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Ranad339abe2017-05-31 09:19:49 -07002317 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002318
2319 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2320
2321 if (!atomic_read(&dwc->in_lpm)) {
2322 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2323 return 0;
2324 }
2325
2326 pm_stay_awake(mdwc->dev);
2327
2328 /* Enable bus voting */
2329 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002330 dbg_event(0xFF, "bus_vote_start", 1);
2331 ret = msm_bus_scale_client_update_request(
2332 mdwc->bus_perf_client, 1);
2333 dbg_event(0xFF, "bus_vote_finish", 1);
2334 if (ret)
2335 dev_err(mdwc->dev, "bus bw voting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002336 }
2337
2338 /* Vote for TCXO while waking up USB HSPHY */
2339 ret = clk_prepare_enable(mdwc->xo_clk);
2340 if (ret)
2341 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2342 __func__, ret);
2343
2344 /* Restore controller power collapse */
2345 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2346 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2347 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302348 ret = reset_control_assert(mdwc->core_reset);
2349 if (ret)
2350 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2351 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002352 /* HW requires a short delay for reset to take place properly */
2353 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302354 ret = reset_control_deassert(mdwc->core_reset);
2355 if (ret)
2356 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2357 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002358 clk_prepare_enable(mdwc->sleep_clk);
2359 }
2360
2361 /*
2362 * Enable clocks
2363 * Turned ON iface_clk before core_clk due to FSM depedency.
2364 */
2365 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302366 if (mdwc->noc_aggr_clk)
2367 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002368
2369 core_clk_rate = mdwc->core_clk_rate;
2370 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2371 core_clk_rate = mdwc->core_clk_rate_hs;
2372 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2373 core_clk_rate);
2374 }
2375
2376 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002377 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002378
2379 /* set Memory core: ON, Memory periphery: ON */
2380 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2381 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2382
Mayank Rana511f3b22016-08-02 12:00:11 -07002383 clk_prepare_enable(mdwc->utmi_clk);
2384 if (mdwc->bus_aggr_clk)
2385 clk_prepare_enable(mdwc->bus_aggr_clk);
2386
2387 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002388 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2389 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002390 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2391 if (mdwc->typec_orientation == ORIENTATION_CC1)
2392 mdwc->ss_phy->flags |= PHY_LANE_A;
2393 if (mdwc->typec_orientation == ORIENTATION_CC2)
2394 mdwc->ss_phy->flags |= PHY_LANE_B;
2395 usb_phy_set_suspend(mdwc->ss_phy, 0);
2396 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2397 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2398 }
2399
2400 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2401 /* Resume HS PHY */
2402 usb_phy_set_suspend(mdwc->hs_phy, 0);
2403
2404 /* Recover from controller power collapse */
2405 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2406 u32 tmp;
2407
Jack Pham9faa51df2017-04-03 18:13:40 -07002408 if (mdwc->iommu_map) {
2409 ret = arm_iommu_attach_device(mdwc->dev,
2410 mdwc->iommu_map);
2411 if (ret)
2412 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2413 ret);
2414 else
2415 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2416 }
2417
Mayank Rana511f3b22016-08-02 12:00:11 -07002418 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2419
2420 dwc3_msm_power_collapse_por(mdwc);
2421
2422 /* Get initial P3 status and enable IN_P3 event */
2423 tmp = dwc3_msm_read_reg_field(mdwc->base,
2424 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2425 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2426 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2427 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2428
2429 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2430 }
2431
2432 atomic_set(&dwc->in_lpm, 0);
2433
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302434 /* enable power evt irq for IN P3 detection */
Mayank Ranad339abe2017-05-31 09:19:49 -07002435 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302436
Mayank Rana511f3b22016-08-02 12:00:11 -07002437 /* Disable HSPHY auto suspend */
2438 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2439 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2440 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2441 DWC3_GUSB2PHYCFG_SUSPHY));
2442
2443 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2444 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002445 if (mdwc->use_pdc_interrupts) {
2446 enable_usb_pdc_interrupt(mdwc, false);
2447 } else {
2448 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2449 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
2450 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2451 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07002452 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002453 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2454 }
2455
2456 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2457
Mayank Rana511f3b22016-08-02 12:00:11 -07002458 /* Enable core irq */
2459 if (dwc->irq)
2460 enable_irq(dwc->irq);
2461
2462 /*
2463 * Handle other power events that could not have been handled during
2464 * Low Power Mode
2465 */
2466 dwc3_pwr_event_handler(mdwc);
2467
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302468 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2469 schedule_delayed_work(&mdwc->perf_vote_work,
2470 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2471
Mayank Rana08e41922017-03-02 15:25:48 -08002472 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002473 return 0;
2474}
2475
2476/**
2477 * dwc3_ext_event_notify - callback to handle events from external transceiver
2478 *
2479 * Returns 0 on success
2480 */
2481static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2482{
2483 /* Flush processing any pending events before handling new ones */
2484 flush_delayed_work(&mdwc->sm_work);
2485
2486 if (mdwc->id_state == DWC3_ID_FLOAT) {
2487 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2488 set_bit(ID, &mdwc->inputs);
2489 } else {
2490 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2491 clear_bit(ID, &mdwc->inputs);
2492 }
2493
2494 if (mdwc->vbus_active && !mdwc->in_restart) {
2495 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2496 set_bit(B_SESS_VLD, &mdwc->inputs);
2497 } else {
2498 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2499 clear_bit(B_SESS_VLD, &mdwc->inputs);
2500 }
2501
2502 if (mdwc->suspend) {
2503 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2504 set_bit(B_SUSPEND, &mdwc->inputs);
2505 } else {
2506 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2507 clear_bit(B_SUSPEND, &mdwc->inputs);
2508 }
2509
2510 schedule_delayed_work(&mdwc->sm_work, 0);
2511}
2512
2513static void dwc3_resume_work(struct work_struct *w)
2514{
2515 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002516 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Jack Pham4e9dff72017-04-04 18:05:53 -07002517 union extcon_property_value val;
2518 unsigned int extcon_id;
2519 struct extcon_dev *edev = NULL;
2520 int ret = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07002521
2522 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2523
Jack Pham4e9dff72017-04-04 18:05:53 -07002524 if (mdwc->vbus_active) {
2525 edev = mdwc->extcon_vbus;
2526 extcon_id = EXTCON_USB;
2527 } else if (mdwc->id_state == DWC3_ID_GROUND) {
2528 edev = mdwc->extcon_id;
2529 extcon_id = EXTCON_USB_HOST;
2530 }
2531
2532 /* Check speed and Type-C polarity values in order to configure PHY */
2533 if (edev && extcon_get_state(edev, extcon_id)) {
2534 ret = extcon_get_property(edev, extcon_id,
2535 EXTCON_PROP_USB_SS, &val);
2536
2537 /* Use default dwc->maximum_speed if speed isn't reported */
2538 if (!ret)
2539 dwc->maximum_speed = (val.intval == 0) ?
2540 USB_SPEED_HIGH : USB_SPEED_SUPER;
2541
2542 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2543 dwc->maximum_speed = dwc->max_hw_supp_speed;
2544
Mayank Ranaf70d8212017-06-12 14:02:07 -07002545 if (override_usb_speed &&
2546 is_valid_usb_speed(dwc, override_usb_speed)) {
2547 dwc->maximum_speed = override_usb_speed;
2548 dbg_event(0xFF, "override_speed", override_usb_speed);
2549 }
2550
Jack Pham4e9dff72017-04-04 18:05:53 -07002551 dbg_event(0xFF, "speed", dwc->maximum_speed);
2552
2553 ret = extcon_get_property(edev, extcon_id,
2554 EXTCON_PROP_USB_TYPEC_POLARITY, &val);
2555 if (ret)
2556 mdwc->typec_orientation = ORIENTATION_NONE;
2557 else
2558 mdwc->typec_orientation = val.intval ?
2559 ORIENTATION_CC2 : ORIENTATION_CC1;
2560
2561 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2562 }
2563
Mayank Rana511f3b22016-08-02 12:00:11 -07002564 /*
2565 * exit LPM first to meet resume timeline from device side.
2566 * resume_pending flag would prevent calling
2567 * dwc3_msm_resume() in case we are here due to system
2568 * wide resume without usb cable connected. This flag is set
2569 * only in case of power event irq in lpm.
2570 */
2571 if (mdwc->resume_pending) {
2572 dwc3_msm_resume(mdwc);
2573 mdwc->resume_pending = false;
2574 }
2575
Mayank Rana08e41922017-03-02 15:25:48 -08002576 if (atomic_read(&mdwc->pm_suspended)) {
2577 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002578 /* let pm resume kick in resume work later */
2579 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002580 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002581 dwc3_ext_event_notify(mdwc);
2582}
2583
2584static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2585{
2586 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2587 u32 irq_stat, irq_clear = 0;
2588
2589 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2590 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2591
2592 /* Check for P3 events */
2593 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2594 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2595 /* Can't tell if entered or exit P3, so check LINKSTATE */
2596 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2597 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2598 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2599 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2600
2601 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2602 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2603 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2604 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2605 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2606 atomic_set(&mdwc->in_p3, 0);
2607 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2608 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2609 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2610 atomic_set(&mdwc->in_p3, 1);
2611 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2612 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2613 }
2614
2615 /* Clear L2 exit */
2616 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2617 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2618 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2619 }
2620
2621 /* Handle exit from L1 events */
2622 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2623 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2624 __func__);
2625 if (usb_gadget_wakeup(&dwc->gadget))
2626 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2627 __func__);
2628 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2629 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2630 }
2631
2632 /* Unhandled events */
2633 if (irq_stat)
2634 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2635 __func__, irq_stat);
2636
2637 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2638}
2639
2640static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2641{
2642 struct dwc3_msm *mdwc = _mdwc;
2643 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2644
2645 dev_dbg(mdwc->dev, "%s\n", __func__);
2646
2647 if (atomic_read(&dwc->in_lpm))
2648 dwc3_resume_work(&mdwc->resume_work);
2649 else
2650 dwc3_pwr_event_handler(mdwc);
2651
Mayank Rana08e41922017-03-02 15:25:48 -08002652 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002653 return IRQ_HANDLED;
2654}
2655
2656static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2657{
2658 struct dwc3_msm *mdwc = data;
2659 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2660
2661 dwc->t_pwr_evt_irq = ktime_get();
2662 dev_dbg(mdwc->dev, "%s received\n", __func__);
2663 /*
2664 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2665 * which interrupts have been triggered, as the clocks are disabled.
2666 * Resume controller by waking up pwr event irq thread.After re-enabling
2667 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2668 * all other power events.
2669 */
2670 if (atomic_read(&dwc->in_lpm)) {
2671 /* set this to call dwc3_msm_resume() */
2672 mdwc->resume_pending = true;
2673 return IRQ_WAKE_THREAD;
2674 }
2675
2676 dwc3_pwr_event_handler(mdwc);
2677 return IRQ_HANDLED;
2678}
2679
2680static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2681 unsigned long action, void *hcpu)
2682{
2683 uint32_t cpu = (uintptr_t)hcpu;
2684 struct dwc3_msm *mdwc =
2685 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2686
2687 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2688 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2689 cpu_to_affin, mdwc->irq_to_affin);
2690 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2691 }
2692
2693 return NOTIFY_OK;
2694}
2695
2696static void dwc3_otg_sm_work(struct work_struct *w);
2697
2698static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2699{
2700 int ret;
2701
2702 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2703 if (IS_ERR(mdwc->dwc3_gdsc))
2704 mdwc->dwc3_gdsc = NULL;
2705
2706 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2707 if (IS_ERR(mdwc->xo_clk)) {
2708 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2709 __func__);
2710 ret = PTR_ERR(mdwc->xo_clk);
2711 return ret;
2712 }
2713 clk_set_rate(mdwc->xo_clk, 19200000);
2714
2715 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2716 if (IS_ERR(mdwc->iface_clk)) {
2717 dev_err(mdwc->dev, "failed to get iface_clk\n");
2718 ret = PTR_ERR(mdwc->iface_clk);
2719 return ret;
2720 }
2721
2722 /*
2723 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2724 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2725 * On newer platform it can run at 150MHz as well.
2726 */
2727 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2728 if (IS_ERR(mdwc->core_clk)) {
2729 dev_err(mdwc->dev, "failed to get core_clk\n");
2730 ret = PTR_ERR(mdwc->core_clk);
2731 return ret;
2732 }
2733
Amit Nischal4d278212016-06-06 17:54:34 +05302734 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2735 if (IS_ERR(mdwc->core_reset)) {
2736 dev_err(mdwc->dev, "failed to get core_reset\n");
2737 return PTR_ERR(mdwc->core_reset);
2738 }
2739
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302740 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302741 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302742 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2743 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302744 }
2745
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302746 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302747 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302748 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2749 mdwc->core_clk_rate);
2750 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2751 if (ret)
2752 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002753
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002754 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2755 (u32 *)&mdwc->core_clk_rate_hs)) {
2756 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2757 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2758 }
2759
Mayank Rana511f3b22016-08-02 12:00:11 -07002760 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2761 if (IS_ERR(mdwc->sleep_clk)) {
2762 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2763 ret = PTR_ERR(mdwc->sleep_clk);
2764 return ret;
2765 }
2766
2767 clk_set_rate(mdwc->sleep_clk, 32000);
2768 mdwc->utmi_clk_rate = 19200000;
2769 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2770 if (IS_ERR(mdwc->utmi_clk)) {
2771 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2772 ret = PTR_ERR(mdwc->utmi_clk);
2773 return ret;
2774 }
2775
2776 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2777 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2778 if (IS_ERR(mdwc->bus_aggr_clk))
2779 mdwc->bus_aggr_clk = NULL;
2780
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302781 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2782 if (IS_ERR(mdwc->noc_aggr_clk))
2783 mdwc->noc_aggr_clk = NULL;
2784
Mayank Rana511f3b22016-08-02 12:00:11 -07002785 if (of_property_match_string(mdwc->dev->of_node,
2786 "clock-names", "cfg_ahb_clk") >= 0) {
2787 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2788 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2789 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2790 mdwc->cfg_ahb_clk = NULL;
2791 if (ret != -EPROBE_DEFER)
2792 dev_err(mdwc->dev,
2793 "failed to get cfg_ahb_clk ret %d\n",
2794 ret);
2795 return ret;
2796 }
2797 }
2798
2799 return 0;
2800}
2801
2802static int dwc3_msm_id_notifier(struct notifier_block *nb,
2803 unsigned long event, void *ptr)
2804{
2805 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002806 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002807 enum dwc3_id_state id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002808
2809 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2810
2811 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2812
Mayank Rana511f3b22016-08-02 12:00:11 -07002813 if (mdwc->id_state != id) {
2814 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002815 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002816 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2817 }
2818
Mayank Rana511f3b22016-08-02 12:00:11 -07002819 return NOTIFY_DONE;
2820}
2821
Hemant Kumar006fae42017-07-12 18:11:25 -07002822
2823static void check_for_sdp_connection(struct work_struct *w)
2824{
Hemant Kumar006fae42017-07-12 18:11:25 -07002825 struct dwc3_msm *mdwc =
2826 container_of(w, struct dwc3_msm, sdp_check.work);
2827 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2828
2829 if (!mdwc->vbus_active)
2830 return;
2831
2832 /* floating D+/D- lines detected */
2833 if (dwc->gadget.state < USB_STATE_DEFAULT &&
2834 dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
Hemant Kumar006fae42017-07-12 18:11:25 -07002835 mdwc->vbus_active = 0;
2836 dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
2837 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2838 }
2839}
2840
Mayank Rana511f3b22016-08-02 12:00:11 -07002841static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2842 unsigned long event, void *ptr)
2843{
2844 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2845 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002846
2847 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2848
2849 if (mdwc->vbus_active == event)
2850 return NOTIFY_DONE;
2851
Mayank Rana511f3b22016-08-02 12:00:11 -07002852 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002853 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002854 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002855
Mayank Rana511f3b22016-08-02 12:00:11 -07002856 return NOTIFY_DONE;
2857}
Jack Pham4e9dff72017-04-04 18:05:53 -07002858
Mayank Rana51958172017-02-28 14:49:21 -08002859/*
2860 * Handle EUD based soft detach/attach event, and force USB high speed mode
2861 * functionality on receiving soft attach event.
2862 *
2863 * @nb - notifier handler
2864 * @event - event information i.e. soft detach/attach event
2865 * @ptr - extcon_dev pointer
2866 *
2867 * @return int - NOTIFY_DONE always due to EUD
2868 */
2869static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2870 unsigned long event, void *ptr)
2871{
2872 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2873 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana51958172017-02-28 14:49:21 -08002874
2875 dbg_event(0xFF, "EUD_NB", event);
2876 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2877 if (mdwc->vbus_active == event)
2878 return NOTIFY_DONE;
2879
2880 /* Force USB High-Speed enumeration Only */
2881 dwc->maximum_speed = USB_SPEED_HIGH;
2882 dbg_event(0xFF, "Speed", dwc->maximum_speed);
2883 mdwc->vbus_active = event;
2884 if (dwc->is_drd && !mdwc->in_restart)
2885 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002886
Mayank Rana51958172017-02-28 14:49:21 -08002887 return NOTIFY_DONE;
2888}
Mayank Rana511f3b22016-08-02 12:00:11 -07002889
2890static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2891{
2892 struct device_node *node = mdwc->dev->of_node;
2893 struct extcon_dev *edev;
2894 int ret = 0;
2895
2896 if (!of_property_read_bool(node, "extcon"))
2897 return 0;
2898
Mayank Rana51958172017-02-28 14:49:21 -08002899 /* Use first phandle (mandatory) for USB vbus status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002900 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2901 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2902 return PTR_ERR(edev);
2903
2904 if (!IS_ERR(edev)) {
2905 mdwc->extcon_vbus = edev;
2906 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2907 ret = extcon_register_notifier(edev, EXTCON_USB,
2908 &mdwc->vbus_nb);
2909 if (ret < 0) {
2910 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2911 return ret;
2912 }
2913 }
2914
Mayank Rana51958172017-02-28 14:49:21 -08002915 /* Use second phandle (optional) for USB ID status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002916 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2917 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2918 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2919 ret = PTR_ERR(edev);
2920 goto err;
2921 }
2922 }
2923
2924 if (!IS_ERR(edev)) {
2925 mdwc->extcon_id = edev;
2926 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
Mayank Rana54d60432017-07-18 12:10:04 -07002927 mdwc->host_restart_nb.notifier_call =
2928 dwc3_restart_usb_host_mode;
Mayank Rana511f3b22016-08-02 12:00:11 -07002929 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2930 &mdwc->id_nb);
2931 if (ret < 0) {
2932 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2933 goto err;
2934 }
Mayank Rana54d60432017-07-18 12:10:04 -07002935
2936 ret = extcon_register_blocking_notifier(edev, EXTCON_USB_HOST,
2937 &mdwc->host_restart_nb);
2938 if (ret < 0) {
2939 dev_err(mdwc->dev, "failed to register blocking notifier\n");
2940 goto err1;
2941 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002942 }
2943
Mayank Rana81bd2e52017-07-26 16:15:15 -07002944 edev = NULL;
Mayank Rana51958172017-02-28 14:49:21 -08002945 /* Use third phandle (optional) for EUD based detach/attach events */
2946 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2947 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2948 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2949 ret = PTR_ERR(edev);
Mayank Rana54d60432017-07-18 12:10:04 -07002950 goto err1;
Mayank Rana51958172017-02-28 14:49:21 -08002951 }
2952 }
2953
Mayank Rana81bd2e52017-07-26 16:15:15 -07002954 if (!IS_ERR_OR_NULL(edev)) {
Mayank Rana51958172017-02-28 14:49:21 -08002955 mdwc->extcon_eud = edev;
2956 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2957 ret = extcon_register_notifier(edev, EXTCON_USB,
2958 &mdwc->eud_event_nb);
2959 if (ret < 0) {
2960 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
Mayank Rana54d60432017-07-18 12:10:04 -07002961 goto err2;
Mayank Rana51958172017-02-28 14:49:21 -08002962 }
2963 }
2964
Mayank Rana511f3b22016-08-02 12:00:11 -07002965 return 0;
Mayank Rana54d60432017-07-18 12:10:04 -07002966err2:
2967 if (mdwc->extcon_id)
2968 extcon_unregister_blocking_notifier(mdwc->extcon_id,
2969 EXTCON_USB_HOST, &mdwc->host_restart_nb);
Mayank Rana51958172017-02-28 14:49:21 -08002970err1:
2971 if (mdwc->extcon_id)
2972 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2973 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002974err:
2975 if (mdwc->extcon_vbus)
2976 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2977 &mdwc->vbus_nb);
2978 return ret;
2979}
2980
Jack Phambbe27962017-03-23 18:42:26 -07002981#define SMMU_BASE 0x10000000 /* Device address range base */
2982#define SMMU_SIZE 0x40000000 /* Device address range size */
2983
2984static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
2985{
2986 struct device_node *node = mdwc->dev->of_node;
Jack Pham283cece2017-04-05 09:58:17 -07002987 int atomic_ctx = 1, s1_bypass;
Jack Phambbe27962017-03-23 18:42:26 -07002988 int ret;
2989
2990 if (!of_property_read_bool(node, "iommus"))
2991 return 0;
2992
2993 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
2994 SMMU_BASE, SMMU_SIZE);
2995 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
2996 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
2997 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
2998 ret);
2999 return ret;
3000 }
3001 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
3002
3003 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
3004 &atomic_ctx);
3005 if (ret) {
3006 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
3007 ret);
Jack Pham9faa51df2017-04-03 18:13:40 -07003008 goto release_mapping;
Jack Phambbe27962017-03-23 18:42:26 -07003009 }
3010
Jack Pham283cece2017-04-05 09:58:17 -07003011 s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
3012 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
3013 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
3014 if (ret) {
3015 dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
3016 s1_bypass, ret);
3017 goto release_mapping;
3018 }
3019
Jack Pham9faa51df2017-04-03 18:13:40 -07003020 ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
3021 if (ret) {
3022 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
3023 goto release_mapping;
3024 }
3025 dev_dbg(mdwc->dev, "attached to IOMMU\n");
3026
Jack Phambbe27962017-03-23 18:42:26 -07003027 return 0;
Jack Pham9faa51df2017-04-03 18:13:40 -07003028
3029release_mapping:
3030 arm_iommu_release_mapping(mdwc->iommu_map);
3031 mdwc->iommu_map = NULL;
3032 return ret;
Jack Phambbe27962017-03-23 18:42:26 -07003033}
3034
Mayank Rana511f3b22016-08-02 12:00:11 -07003035static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
3036 char *buf)
3037{
3038 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3039
3040 if (mdwc->vbus_active)
3041 return snprintf(buf, PAGE_SIZE, "peripheral\n");
3042 if (mdwc->id_state == DWC3_ID_GROUND)
3043 return snprintf(buf, PAGE_SIZE, "host\n");
3044
3045 return snprintf(buf, PAGE_SIZE, "none\n");
3046}
3047
3048static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
3049 const char *buf, size_t count)
3050{
3051 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3052
3053 if (sysfs_streq(buf, "peripheral")) {
3054 mdwc->vbus_active = true;
3055 mdwc->id_state = DWC3_ID_FLOAT;
3056 } else if (sysfs_streq(buf, "host")) {
3057 mdwc->vbus_active = false;
3058 mdwc->id_state = DWC3_ID_GROUND;
3059 } else {
3060 mdwc->vbus_active = false;
3061 mdwc->id_state = DWC3_ID_FLOAT;
3062 }
3063
3064 dwc3_ext_event_notify(mdwc);
3065
3066 return count;
3067}
3068
3069static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303070static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07003071
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003072static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
3073 char *buf)
3074{
3075 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3076 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3077
3078 return snprintf(buf, PAGE_SIZE, "%s\n",
3079 usb_speed_string(dwc->max_hw_supp_speed));
3080}
3081
3082static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
3083 const char *buf, size_t count)
3084{
3085 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3086 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3087 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
3088
3089 if (sysfs_streq(buf, "high"))
3090 req_speed = USB_SPEED_HIGH;
3091 else if (sysfs_streq(buf, "super"))
3092 req_speed = USB_SPEED_SUPER;
3093
3094 if (req_speed != USB_SPEED_UNKNOWN &&
3095 req_speed != dwc->max_hw_supp_speed) {
3096 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
3097 schedule_work(&mdwc->restart_usb_work);
3098 }
3099
3100 return count;
3101}
3102static DEVICE_ATTR_RW(speed);
3103
Mayank Rana511f3b22016-08-02 12:00:11 -07003104static int dwc3_msm_probe(struct platform_device *pdev)
3105{
3106 struct device_node *node = pdev->dev.of_node, *dwc3_node;
3107 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08003108 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003109 struct dwc3_msm *mdwc;
3110 struct dwc3 *dwc;
3111 struct resource *res;
3112 void __iomem *tcsr;
3113 bool host_mode;
Mayank Ranad339abe2017-05-31 09:19:49 -07003114 int ret = 0, i;
Mayank Rana511f3b22016-08-02 12:00:11 -07003115 int ext_hub_reset_gpio;
3116 u32 val;
Mayank Ranad339abe2017-05-31 09:19:49 -07003117 unsigned long irq_type;
Mayank Rana511f3b22016-08-02 12:00:11 -07003118
3119 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
3120 if (!mdwc)
3121 return -ENOMEM;
3122
3123 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
3124 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
3125 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
3126 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
3127 return -EOPNOTSUPP;
3128 }
3129 }
3130
3131 platform_set_drvdata(pdev, mdwc);
3132 mdwc->dev = &pdev->dev;
3133
3134 INIT_LIST_HEAD(&mdwc->req_complete_list);
3135 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
3136 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07003137 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003138 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303139 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Hemant Kumar006fae42017-07-12 18:11:25 -07003140 INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
Mayank Rana511f3b22016-08-02 12:00:11 -07003141
3142 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
3143 if (!mdwc->dwc3_wq) {
3144 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
3145 return -ENOMEM;
3146 }
3147
3148 /* Get all clks and gdsc reference */
3149 ret = dwc3_msm_get_clk_gdsc(mdwc);
3150 if (ret) {
3151 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
3152 return ret;
3153 }
3154
3155 mdwc->id_state = DWC3_ID_FLOAT;
3156 set_bit(ID, &mdwc->inputs);
3157
3158 mdwc->charging_disabled = of_property_read_bool(node,
3159 "qcom,charging-disabled");
3160
3161 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
3162 &mdwc->lpm_to_suspend_delay);
3163 if (ret) {
3164 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
3165 mdwc->lpm_to_suspend_delay = 0;
3166 }
3167
Mayank Ranad339abe2017-05-31 09:19:49 -07003168 memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
3169 for (i = 0; i < USB_MAX_IRQ; i++) {
3170 irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
3171 IRQF_ONESHOT;
3172 mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
3173 mdwc->wakeup_irq[i].name);
3174 if (mdwc->wakeup_irq[i].irq < 0) {
3175 /* pwr_evnt_irq is only mandatory irq */
3176 if (!strcmp(mdwc->wakeup_irq[i].name,
3177 "pwr_event_irq")) {
3178 dev_err(&pdev->dev, "get_irq for %s failed\n\n",
3179 mdwc->wakeup_irq[i].name);
3180 ret = -EINVAL;
3181 goto err;
3182 }
3183 mdwc->wakeup_irq[i].irq = 0;
3184 } else {
3185 irq_set_status_flags(mdwc->wakeup_irq[i].irq,
3186 IRQ_NOAUTOEN);
3187 /* ss_phy_irq is level trigger interrupt */
3188 if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
3189 irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
3190 IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
Mayank Rana511f3b22016-08-02 12:00:11 -07003191
Mayank Ranad339abe2017-05-31 09:19:49 -07003192 ret = devm_request_threaded_irq(&pdev->dev,
3193 mdwc->wakeup_irq[i].irq,
Mayank Rana511f3b22016-08-02 12:00:11 -07003194 msm_dwc3_pwr_irq,
3195 msm_dwc3_pwr_irq_thread,
Mayank Ranad339abe2017-05-31 09:19:49 -07003196 irq_type,
3197 mdwc->wakeup_irq[i].name, mdwc);
3198 if (ret) {
3199 dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
3200 mdwc->wakeup_irq[i].name, ret);
3201 goto err;
3202 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003203 }
3204 }
3205
3206 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3207 if (!res) {
3208 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3209 } else {
3210 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3211 resource_size(res));
3212 if (IS_ERR_OR_NULL(tcsr)) {
3213 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3214 } else {
3215 /* Enable USB3 on the primary USB port. */
3216 writel_relaxed(0x1, tcsr);
3217 /*
3218 * Ensure that TCSR write is completed before
3219 * USB registers initialization.
3220 */
3221 mb();
3222 }
3223 }
3224
3225 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3226 if (!res) {
3227 dev_err(&pdev->dev, "missing memory base resource\n");
3228 ret = -ENODEV;
3229 goto err;
3230 }
3231
3232 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3233 resource_size(res));
3234 if (!mdwc->base) {
3235 dev_err(&pdev->dev, "ioremap failed\n");
3236 ret = -ENODEV;
3237 goto err;
3238 }
3239
3240 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3241 "ahb2phy_base");
3242 if (res) {
3243 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3244 res->start, resource_size(res));
3245 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3246 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3247 mdwc->ahb2phy_base = NULL;
3248 } else {
3249 /*
3250 * On some targets cfg_ahb_clk depends upon usb gdsc
3251 * regulator. If cfg_ahb_clk is enabled without
3252 * turning on usb gdsc regulator clk is stuck off.
3253 */
3254 dwc3_msm_config_gdsc(mdwc, 1);
3255 clk_prepare_enable(mdwc->cfg_ahb_clk);
3256 /* Configure AHB2PHY for one wait state read/write*/
3257 val = readl_relaxed(mdwc->ahb2phy_base +
3258 PERIPH_SS_AHB2PHY_TOP_CFG);
3259 if (val != ONE_READ_WRITE_WAIT) {
3260 writel_relaxed(ONE_READ_WRITE_WAIT,
3261 mdwc->ahb2phy_base +
3262 PERIPH_SS_AHB2PHY_TOP_CFG);
3263 /* complete above write before using USB PHY */
3264 mb();
3265 }
3266 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3267 dwc3_msm_config_gdsc(mdwc, 0);
3268 }
3269 }
3270
3271 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3272 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3273 if (IS_ERR(mdwc->dbm)) {
3274 dev_err(&pdev->dev, "unable to get dbm device\n");
3275 ret = -EPROBE_DEFER;
3276 goto err;
3277 }
3278 /*
3279 * Add power event if the dbm indicates coming out of L1
3280 * by interrupt
3281 */
3282 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
Mayank Ranad339abe2017-05-31 09:19:49 -07003283 if (!mdwc->wakeup_irq[PWR_EVNT_IRQ].irq) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003284 dev_err(&pdev->dev,
3285 "need pwr_event_irq exiting L1\n");
3286 ret = -EINVAL;
3287 goto err;
3288 }
3289 }
3290 }
3291
3292 ext_hub_reset_gpio = of_get_named_gpio(node,
3293 "qcom,ext-hub-reset-gpio", 0);
3294
3295 if (gpio_is_valid(ext_hub_reset_gpio)
3296 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3297 "qcom,ext-hub-reset-gpio"))) {
3298 /* reset external hub */
3299 gpio_direction_output(ext_hub_reset_gpio, 1);
3300 /*
3301 * Hub reset should be asserted for minimum 5microsec
3302 * before deasserting.
3303 */
3304 usleep_range(5, 1000);
3305 gpio_direction_output(ext_hub_reset_gpio, 0);
3306 }
3307
3308 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3309 &mdwc->tx_fifo_size))
3310 dev_err(&pdev->dev,
3311 "unable to read platform data tx fifo size\n");
3312
3313 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3314 "qcom,disable-host-mode-pm");
Mayank Ranad339abe2017-05-31 09:19:49 -07003315 mdwc->use_pdc_interrupts = of_property_read_bool(node,
3316 "qcom,use-pdc-interrupts");
Mayank Rana511f3b22016-08-02 12:00:11 -07003317 dwc3_set_notifier(&dwc3_msm_notify_event);
3318
Jack Phambbe27962017-03-23 18:42:26 -07003319 ret = dwc3_msm_init_iommu(mdwc);
3320 if (ret)
3321 goto err;
3322
Mayank Rana511f3b22016-08-02 12:00:11 -07003323 /* Assumes dwc3 is the first DT child of dwc3-msm */
3324 dwc3_node = of_get_next_available_child(node, NULL);
3325 if (!dwc3_node) {
3326 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3327 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003328 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003329 }
3330
3331 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3332 if (ret) {
3333 dev_err(&pdev->dev,
3334 "failed to add create dwc3 core\n");
3335 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003336 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003337 }
3338
3339 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3340 of_node_put(dwc3_node);
3341 if (!mdwc->dwc3) {
3342 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3343 goto put_dwc3;
3344 }
3345
3346 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3347 "usb-phy", 0);
3348 if (IS_ERR(mdwc->hs_phy)) {
3349 dev_err(&pdev->dev, "unable to get hsphy device\n");
3350 ret = PTR_ERR(mdwc->hs_phy);
3351 goto put_dwc3;
3352 }
3353 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3354 "usb-phy", 1);
3355 if (IS_ERR(mdwc->ss_phy)) {
3356 dev_err(&pdev->dev, "unable to get ssphy device\n");
3357 ret = PTR_ERR(mdwc->ss_phy);
3358 goto put_dwc3;
3359 }
3360
3361 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3362 if (mdwc->bus_scale_table) {
3363 mdwc->bus_perf_client =
3364 msm_bus_scale_register_client(mdwc->bus_scale_table);
3365 }
3366
3367 dwc = platform_get_drvdata(mdwc->dwc3);
3368 if (!dwc) {
3369 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3370 goto put_dwc3;
3371 }
3372
3373 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3374 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3375
3376 if (cpu_to_affin)
3377 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3378
Mayank Ranaf4918d32016-12-15 13:35:55 -08003379 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3380 &mdwc->num_gsi_event_buffers);
3381
Jack Pham9faa51df2017-04-03 18:13:40 -07003382 /* IOMMU will be reattached upon each resume/connect */
3383 if (mdwc->iommu_map)
3384 arm_iommu_detach_device(mdwc->dev);
3385
Mayank Rana511f3b22016-08-02 12:00:11 -07003386 /*
3387 * Clocks and regulators will not be turned on until the first time
3388 * runtime PM resume is called. This is to allow for booting up with
3389 * charger already connected so as not to disturb PHY line states.
3390 */
3391 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3392 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003393 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3394 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003395 device_init_wakeup(mdwc->dev, 1);
3396
3397 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3398 pm_runtime_get_noresume(mdwc->dev);
3399
3400 ret = dwc3_msm_extcon_register(mdwc);
3401 if (ret)
3402 goto put_dwc3;
3403
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303404 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3405 &mdwc->pm_qos_latency);
3406 if (ret) {
3407 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3408 mdwc->pm_qos_latency = 0;
3409 }
3410
Hemant Kumar8220a982017-01-19 18:11:34 -08003411 mdwc->usb_psy = power_supply_get_by_name("usb");
3412 if (!mdwc->usb_psy) {
3413 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3414 pval.intval = -EINVAL;
3415 } else {
3416 power_supply_get_property(mdwc->usb_psy,
3417 POWER_SUPPLY_PROP_PRESENT, &pval);
3418 }
3419
Mayank Rana511f3b22016-08-02 12:00:11 -07003420 /* Update initial VBUS/ID state from extcon */
Jack Pham4e9dff72017-04-04 18:05:53 -07003421 if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
Mayank Rana511f3b22016-08-02 12:00:11 -07003422 EXTCON_USB))
3423 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Jack Pham4e9dff72017-04-04 18:05:53 -07003424 else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003425 EXTCON_USB_HOST))
3426 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003427 else if (!pval.intval) {
3428 /* USB cable is not connected */
3429 schedule_delayed_work(&mdwc->sm_work, 0);
3430 } else {
3431 if (pval.intval > 0)
3432 dev_info(mdwc->dev, "charger detection in progress\n");
3433 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003434
3435 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003436 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003437
Mayank Rana511f3b22016-08-02 12:00:11 -07003438 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3439 if (!dwc->is_drd && host_mode) {
3440 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3441 mdwc->id_state = DWC3_ID_GROUND;
3442 dwc3_ext_event_notify(mdwc);
3443 }
3444
3445 return 0;
3446
3447put_dwc3:
3448 platform_device_put(mdwc->dwc3);
3449 if (mdwc->bus_perf_client)
3450 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Jack Phambbe27962017-03-23 18:42:26 -07003451uninit_iommu:
Jack Pham9faa51df2017-04-03 18:13:40 -07003452 if (mdwc->iommu_map) {
3453 arm_iommu_detach_device(mdwc->dev);
Jack Phambbe27962017-03-23 18:42:26 -07003454 arm_iommu_release_mapping(mdwc->iommu_map);
Jack Pham9faa51df2017-04-03 18:13:40 -07003455 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003456err:
3457 return ret;
3458}
3459
3460static int dwc3_msm_remove_children(struct device *dev, void *data)
3461{
3462 device_unregister(dev);
3463 return 0;
3464}
3465
3466static int dwc3_msm_remove(struct platform_device *pdev)
3467{
3468 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003469 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003470 int ret_pm;
3471
3472 device_remove_file(&pdev->dev, &dev_attr_mode);
3473
3474 if (cpu_to_affin)
3475 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3476
3477 /*
3478 * In case of system suspend, pm_runtime_get_sync fails.
3479 * Hence turn ON the clocks manually.
3480 */
3481 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003482 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003483 if (ret_pm < 0) {
3484 dev_err(mdwc->dev,
3485 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303486 if (mdwc->noc_aggr_clk)
3487 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003488 clk_prepare_enable(mdwc->utmi_clk);
3489 clk_prepare_enable(mdwc->core_clk);
3490 clk_prepare_enable(mdwc->iface_clk);
3491 clk_prepare_enable(mdwc->sleep_clk);
3492 if (mdwc->bus_aggr_clk)
3493 clk_prepare_enable(mdwc->bus_aggr_clk);
3494 clk_prepare_enable(mdwc->xo_clk);
3495 }
3496
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303497 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003498 cancel_delayed_work_sync(&mdwc->sm_work);
3499
3500 if (mdwc->hs_phy)
3501 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3502 platform_device_put(mdwc->dwc3);
3503 device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
3504
Mayank Rana08e41922017-03-02 15:25:48 -08003505 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003506 pm_runtime_disable(mdwc->dev);
3507 pm_runtime_barrier(mdwc->dev);
3508 pm_runtime_put_sync(mdwc->dev);
3509 pm_runtime_set_suspended(mdwc->dev);
3510 device_wakeup_disable(mdwc->dev);
3511
3512 if (mdwc->bus_perf_client)
3513 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3514
3515 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3516 regulator_disable(mdwc->vbus_reg);
3517
Mayank Ranad339abe2017-05-31 09:19:49 -07003518 if (mdwc->wakeup_irq[HS_PHY_IRQ].irq)
3519 disable_irq(mdwc->wakeup_irq[HS_PHY_IRQ].irq);
3520 if (mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq)
3521 disable_irq(mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq);
3522 if (mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq)
3523 disable_irq(mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq);
3524 if (mdwc->wakeup_irq[SS_PHY_IRQ].irq)
3525 disable_irq(mdwc->wakeup_irq[SS_PHY_IRQ].irq);
3526 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003527
3528 clk_disable_unprepare(mdwc->utmi_clk);
3529 clk_set_rate(mdwc->core_clk, 19200000);
3530 clk_disable_unprepare(mdwc->core_clk);
3531 clk_disable_unprepare(mdwc->iface_clk);
3532 clk_disable_unprepare(mdwc->sleep_clk);
3533 clk_disable_unprepare(mdwc->xo_clk);
3534 clk_put(mdwc->xo_clk);
3535
3536 dwc3_msm_config_gdsc(mdwc, 0);
3537
Jack Phambbe27962017-03-23 18:42:26 -07003538 if (mdwc->iommu_map) {
3539 if (!atomic_read(&dwc->in_lpm))
3540 arm_iommu_detach_device(mdwc->dev);
3541 arm_iommu_release_mapping(mdwc->iommu_map);
3542 }
3543
Mayank Rana511f3b22016-08-02 12:00:11 -07003544 return 0;
3545}
3546
Jack Pham4d4e9342016-12-07 19:25:02 -08003547static int dwc3_msm_host_notifier(struct notifier_block *nb,
3548 unsigned long event, void *ptr)
3549{
3550 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3551 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3552 struct usb_device *udev = ptr;
3553 union power_supply_propval pval;
3554 unsigned int max_power;
3555
3556 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3557 return NOTIFY_DONE;
3558
3559 if (!mdwc->usb_psy) {
3560 mdwc->usb_psy = power_supply_get_by_name("usb");
3561 if (!mdwc->usb_psy)
3562 return NOTIFY_DONE;
3563 }
3564
3565 /*
3566 * For direct-attach devices, new udev is direct child of root hub
3567 * i.e. dwc -> xhci -> root_hub -> udev
3568 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3569 */
3570 if (udev->parent && !udev->parent->parent &&
3571 udev->dev.parent->parent == &dwc->xhci->dev) {
3572 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003573 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3574 /*
3575 * Core clock rate can be reduced only if root
3576 * hub SS port is not enabled/connected.
3577 */
3578 clk_set_rate(mdwc->core_clk,
3579 mdwc->core_clk_rate_hs);
3580 dev_dbg(mdwc->dev,
3581 "set hs core clk rate %ld\n",
3582 mdwc->core_clk_rate_hs);
3583 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3584 } else {
3585 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3586 }
3587
Jack Pham4d4e9342016-12-07 19:25:02 -08003588 if (udev->speed >= USB_SPEED_SUPER)
3589 max_power = udev->actconfig->desc.bMaxPower * 8;
3590 else
3591 max_power = udev->actconfig->desc.bMaxPower * 2;
3592 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3593 dev_name(&udev->dev), max_power);
3594
3595 /* inform PMIC of max power so it can optimize boost */
3596 pval.intval = max_power * 1000;
3597 power_supply_set_property(mdwc->usb_psy,
3598 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3599 } else {
3600 pval.intval = 0;
3601 power_supply_set_property(mdwc->usb_psy,
3602 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003603
3604 /* set rate back to default core clk rate */
3605 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3606 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3607 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003608 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003609 }
3610 }
3611
3612 return NOTIFY_DONE;
3613}
3614
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303615static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3616{
3617 static bool curr_perf_mode;
3618 int latency = mdwc->pm_qos_latency;
3619
3620 if ((curr_perf_mode == perf_mode) || !latency)
3621 return;
3622
3623 if (perf_mode)
3624 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3625 else
3626 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3627 PM_QOS_DEFAULT_VALUE);
3628
3629 curr_perf_mode = perf_mode;
3630 pr_debug("%s: latency updated to: %d\n", __func__,
3631 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3632}
3633
3634static void msm_dwc3_perf_vote_work(struct work_struct *w)
3635{
3636 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3637 perf_vote_work.work);
3638 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3639 static unsigned long last_irq_cnt;
3640 bool in_perf_mode = false;
3641
3642 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3643 in_perf_mode = true;
3644
3645 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3646 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3647
3648 last_irq_cnt = dwc->irq_cnt;
3649 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3650 schedule_delayed_work(&mdwc->perf_vote_work,
3651 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3652}
3653
Mayank Rana511f3b22016-08-02 12:00:11 -07003654#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3655
3656/**
3657 * dwc3_otg_start_host - helper function for starting/stoping the host
3658 * controller driver.
3659 *
3660 * @mdwc: Pointer to the dwc3_msm structure.
3661 * @on: start / stop the host controller driver.
3662 *
3663 * Returns 0 on success otherwise negative errno.
3664 */
3665static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3666{
3667 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3668 int ret = 0;
3669
3670 if (!dwc->xhci)
3671 return -EINVAL;
3672
3673 /*
3674 * The vbus_reg pointer could have multiple values
3675 * NULL: regulator_get() hasn't been called, or was previously deferred
3676 * IS_ERR: regulator could not be obtained, so skip using it
3677 * Valid pointer otherwise
3678 */
3679 if (!mdwc->vbus_reg) {
3680 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3681 "vbus_dwc3");
3682 if (IS_ERR(mdwc->vbus_reg) &&
3683 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3684 /* regulators may not be ready, so retry again later */
3685 mdwc->vbus_reg = NULL;
3686 return -EPROBE_DEFER;
3687 }
3688 }
3689
3690 if (on) {
3691 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3692
Mayank Rana511f3b22016-08-02 12:00:11 -07003693 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003694 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Hemant Kumarde1df692016-04-26 19:36:48 -07003695 mdwc->ss_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003696 usb_phy_notify_connect(mdwc->ss_phy,
3697 USB_SPEED_SUPER);
3698 }
Hemant Kumarde1df692016-04-26 19:36:48 -07003699
Mayank Rana0d5efd72017-06-08 10:06:00 -07003700 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
Hemant Kumarde1df692016-04-26 19:36:48 -07003701 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003702 dbg_event(0xFF, "StrtHost gync",
3703 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003704 if (!IS_ERR(mdwc->vbus_reg))
3705 ret = regulator_enable(mdwc->vbus_reg);
3706 if (ret) {
3707 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3708 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3709 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3710 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003711 dbg_event(0xFF, "vregerr psync",
3712 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003713 return ret;
3714 }
3715
3716 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3717
Jack Pham4d4e9342016-12-07 19:25:02 -08003718 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3719 usb_register_notify(&mdwc->host_nb);
3720
Manu Gautam976fdfc2016-08-18 09:27:35 +05303721 mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
3722 usb_register_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003723 /*
3724 * FIXME If micro A cable is disconnected during system suspend,
3725 * xhci platform device will be removed before runtime pm is
3726 * enabled for xhci device. Due to this, disable_depth becomes
3727 * greater than one and runtimepm is not enabled for next microA
3728 * connect. Fix this by calling pm_runtime_init for xhci device.
3729 */
3730 pm_runtime_init(&dwc->xhci->dev);
3731 ret = platform_device_add(dwc->xhci);
3732 if (ret) {
3733 dev_err(mdwc->dev,
3734 "%s: failed to add XHCI pdev ret=%d\n",
3735 __func__, ret);
3736 if (!IS_ERR(mdwc->vbus_reg))
3737 regulator_disable(mdwc->vbus_reg);
3738 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3739 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3740 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003741 dbg_event(0xFF, "pdeverr psync",
3742 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003743 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003744 return ret;
3745 }
3746
3747 /*
3748 * In some cases it is observed that USB PHY is not going into
3749 * suspend with host mode suspend functionality. Hence disable
3750 * XHCI's runtime PM here if disable_host_mode_pm is set.
3751 */
3752 if (mdwc->disable_host_mode_pm)
3753 pm_runtime_disable(&dwc->xhci->dev);
3754
3755 mdwc->in_host_mode = true;
3756 dwc3_usb3_phy_suspend(dwc, true);
3757
3758 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003759 dbg_event(0xFF, "StrtHost psync",
3760 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003761 pm_runtime_mark_last_busy(mdwc->dev);
3762 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303763#ifdef CONFIG_SMP
3764 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3765 mdwc->pm_qos_req_dma.irq = dwc->irq;
3766#endif
3767 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3768 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3769 /* start in perf mode for better performance initially */
3770 msm_dwc3_perf_vote_update(mdwc, true);
3771 schedule_delayed_work(&mdwc->perf_vote_work,
3772 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003773 } else {
3774 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3775
Manu Gautam976fdfc2016-08-18 09:27:35 +05303776 usb_unregister_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003777 if (!IS_ERR(mdwc->vbus_reg))
3778 ret = regulator_disable(mdwc->vbus_reg);
3779 if (ret) {
3780 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3781 return ret;
3782 }
3783
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303784 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3785 msm_dwc3_perf_vote_update(mdwc, false);
3786 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3787
Mayank Rana511f3b22016-08-02 12:00:11 -07003788 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003789 dbg_event(0xFF, "StopHost gsync",
3790 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003791 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
Mayank Rana0d5efd72017-06-08 10:06:00 -07003792 if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
3793 usb_phy_notify_disconnect(mdwc->ss_phy,
3794 USB_SPEED_SUPER);
3795 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3796 }
3797
Mayank Rana511f3b22016-08-02 12:00:11 -07003798 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Mayank Rana511f3b22016-08-02 12:00:11 -07003799 platform_device_del(dwc->xhci);
Jack Pham4d4e9342016-12-07 19:25:02 -08003800 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003801
Mayank Rana511f3b22016-08-02 12:00:11 -07003802 dwc3_usb3_phy_suspend(dwc, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07003803 mdwc->in_host_mode = false;
3804
Mayank Rana511f3b22016-08-02 12:00:11 -07003805 pm_runtime_mark_last_busy(mdwc->dev);
3806 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003807 dbg_event(0xFF, "StopHost psync",
3808 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003809 }
3810
3811 return 0;
3812}
3813
3814static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3815{
3816 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3817
3818 /* Update OTG VBUS Valid from HSPHY to controller */
3819 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3820 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3821 UTMI_OTG_VBUS_VALID,
3822 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3823
3824 /* Update only if Super Speed is supported */
3825 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3826 /* Update VBUS Valid from SSPHY to controller */
3827 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3828 LANE0_PWR_PRESENT,
3829 vbus_present ? LANE0_PWR_PRESENT : 0);
3830 }
3831}
3832
3833/**
3834 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3835 *
3836 * @mdwc: Pointer to the dwc3_msm structure.
3837 * @on: Turn ON/OFF the gadget.
3838 *
3839 * Returns 0 on success otherwise negative errno.
3840 */
3841static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3842{
3843 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3844
3845 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003846 dbg_event(0xFF, "StrtGdgt gsync",
3847 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003848
3849 if (on) {
3850 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3851 __func__, dwc->gadget.name);
3852
3853 dwc3_override_vbus_status(mdwc, true);
3854 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3855 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3856
3857 /*
3858 * Core reset is not required during start peripheral. Only
3859 * DBM reset is required, hence perform only DBM reset here.
3860 */
3861 dwc3_msm_block_reset(mdwc, false);
3862
3863 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3864 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303865#ifdef CONFIG_SMP
3866 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3867 mdwc->pm_qos_req_dma.irq = dwc->irq;
3868#endif
3869 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3870 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3871 /* start in perf mode for better performance initially */
3872 msm_dwc3_perf_vote_update(mdwc, true);
3873 schedule_delayed_work(&mdwc->perf_vote_work,
3874 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003875 } else {
3876 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3877 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303878 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3879 msm_dwc3_perf_vote_update(mdwc, false);
3880 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3881
Mayank Rana511f3b22016-08-02 12:00:11 -07003882 usb_gadget_vbus_disconnect(&dwc->gadget);
3883 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3884 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3885 dwc3_override_vbus_status(mdwc, false);
3886 dwc3_usb3_phy_suspend(dwc, false);
3887 }
3888
3889 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003890 dbg_event(0xFF, "StopGdgt psync",
3891 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003892
3893 return 0;
3894}
3895
Mayank Rana54d60432017-07-18 12:10:04 -07003896/* speed: 0 - USB_SPEED_HIGH, 1 - USB_SPEED_SUPER */
3897static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
3898 unsigned long event, void *ptr)
3899{
3900 struct dwc3_msm *mdwc;
3901 struct dwc3 *dwc;
3902 int ret = -EINVAL, usb_speed;
3903
3904 mdwc = container_of(nb, struct dwc3_msm, host_restart_nb);
3905 dwc = platform_get_drvdata(mdwc->dwc3);
3906
3907 usb_speed = (event == 0 ? USB_SPEED_HIGH : USB_SPEED_SUPER);
3908 if (dwc->maximum_speed == usb_speed)
3909 goto err;
3910
3911 dbg_event(0xFF, "stop_host_mode", dwc->maximum_speed);
3912 ret = dwc3_otg_start_host(mdwc, 0);
3913 if (ret)
3914 goto err;
3915
3916 /*
3917 * stop host mode functionality performs autosuspend with mdwc
3918 * device, and it may take sometime to call PM runtime suspend.
3919 * Hence call pm_runtime_suspend() API to invoke PM runtime
3920 * suspend immediately to put USB controller and PHYs into suspend.
3921 */
3922 ret = pm_runtime_suspend(mdwc->dev);
3923 dbg_event(0xFF, "pm_runtime_sus", ret);
3924
3925 dwc->maximum_speed = usb_speed;
3926 mdwc->otg_state = OTG_STATE_B_IDLE;
3927 schedule_delayed_work(&mdwc->sm_work, 0);
3928 dbg_event(0xFF, "complete_host_change", dwc->maximum_speed);
3929err:
3930 return ret;
3931}
3932
Hemant Kumar006fae42017-07-12 18:11:25 -07003933static int get_psy_type(struct dwc3_msm *mdwc)
Mayank Rana511f3b22016-08-02 12:00:11 -07003934{
Jack Pham8caff352016-08-19 16:33:55 -07003935 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003936
3937 if (mdwc->charging_disabled)
Hemant Kumar006fae42017-07-12 18:11:25 -07003938 return -EINVAL;
Mayank Rana511f3b22016-08-02 12:00:11 -07003939
3940 if (!mdwc->usb_psy) {
3941 mdwc->usb_psy = power_supply_get_by_name("usb");
3942 if (!mdwc->usb_psy) {
Hemant Kumar006fae42017-07-12 18:11:25 -07003943 dev_err(mdwc->dev, "Could not get usb psy\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003944 return -ENODEV;
3945 }
3946 }
3947
Hemant Kumar006fae42017-07-12 18:11:25 -07003948 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
3949 &pval);
3950
3951 return pval.intval;
3952}
3953
3954static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3955{
3956 union power_supply_propval pval = {0};
3957 int ret, psy_type;
3958
3959 if (mdwc->max_power == mA)
3960 return 0;
3961
3962 psy_type = get_psy_type(mdwc);
Hemant Kumard6bae052017-07-27 15:11:25 -07003963 if (psy_type == POWER_SUPPLY_TYPE_USB) {
3964 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3965 /* Set max current limit in uA */
3966 pval.intval = 1000 * mA;
3967 } else if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
3968 pval.intval = -ETIMEDOUT;
3969 } else {
Jack Pham8caff352016-08-19 16:33:55 -07003970 return 0;
Hemant Kumard6bae052017-07-27 15:11:25 -07003971 }
Jack Pham8caff352016-08-19 16:33:55 -07003972
Jack Phamd72bafe2016-08-09 11:07:22 -07003973 ret = power_supply_set_property(mdwc->usb_psy,
3974 POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
3975 if (ret) {
3976 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3977 return ret;
3978 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003979
3980 mdwc->max_power = mA;
3981 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003982}
3983
3984
3985/**
3986 * dwc3_otg_sm_work - workqueue function.
3987 *
3988 * @w: Pointer to the dwc3 otg workqueue
3989 *
3990 * NOTE: After any change in otg_state, we must reschdule the state machine.
3991 */
3992static void dwc3_otg_sm_work(struct work_struct *w)
3993{
3994 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3995 struct dwc3 *dwc = NULL;
3996 bool work = 0;
3997 int ret = 0;
3998 unsigned long delay = 0;
3999 const char *state;
4000
4001 if (mdwc->dwc3)
4002 dwc = platform_get_drvdata(mdwc->dwc3);
4003
4004 if (!dwc) {
4005 dev_err(mdwc->dev, "dwc is NULL.\n");
4006 return;
4007 }
4008
4009 state = usb_otg_state_string(mdwc->otg_state);
4010 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08004011 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004012
4013 /* Check OTG state */
4014 switch (mdwc->otg_state) {
4015 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08004016 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07004017 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08004018 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
4019 dbg_event(0xFF, "undef_id_!bsv", 0);
4020 pm_runtime_set_active(mdwc->dev);
4021 pm_runtime_enable(mdwc->dev);
4022 pm_runtime_get_noresume(mdwc->dev);
4023 dwc3_msm_resume(mdwc);
4024 pm_runtime_put_sync(mdwc->dev);
4025 dbg_event(0xFF, "Undef NoUSB",
4026 atomic_read(&mdwc->dev->power.usage_count));
4027 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07004028 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08004029 }
Mayank Rana511f3b22016-08-02 12:00:11 -07004030
Mayank Rana08e41922017-03-02 15:25:48 -08004031 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004032 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08004033 pm_runtime_set_suspended(mdwc->dev);
4034 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07004035 /* fall-through */
4036 case OTG_STATE_B_IDLE:
4037 if (!test_bit(ID, &mdwc->inputs)) {
4038 dev_dbg(mdwc->dev, "!id\n");
4039 mdwc->otg_state = OTG_STATE_A_IDLE;
4040 work = 1;
4041 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
4042 dev_dbg(mdwc->dev, "b_sess_vld\n");
Hemant Kumar006fae42017-07-12 18:11:25 -07004043 if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
4044 queue_delayed_work(mdwc->dwc3_wq,
4045 &mdwc->sdp_check,
4046 msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
Mayank Rana511f3b22016-08-02 12:00:11 -07004047 /*
4048 * Increment pm usage count upon cable connect. Count
4049 * is decremented in OTG_STATE_B_PERIPHERAL state on
4050 * cable disconnect or in bus suspend.
4051 */
4052 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004053 dbg_event(0xFF, "BIDLE gsync",
4054 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004055 dwc3_otg_start_peripheral(mdwc, 1);
4056 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4057 work = 1;
4058 } else {
4059 dwc3_msm_gadget_vbus_draw(mdwc, 0);
4060 dev_dbg(mdwc->dev, "Cable disconnected\n");
4061 }
4062 break;
4063
4064 case OTG_STATE_B_PERIPHERAL:
4065 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
4066 !test_bit(ID, &mdwc->inputs)) {
4067 dev_dbg(mdwc->dev, "!id || !bsv\n");
4068 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004069 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004070 dwc3_otg_start_peripheral(mdwc, 0);
4071 /*
4072 * Decrement pm usage count upon cable disconnect
4073 * which was incremented upon cable connect in
4074 * OTG_STATE_B_IDLE state
4075 */
4076 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004077 dbg_event(0xFF, "!BSV psync",
4078 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004079 work = 1;
4080 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
4081 test_bit(B_SESS_VLD, &mdwc->inputs)) {
4082 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
4083 mdwc->otg_state = OTG_STATE_B_SUSPEND;
4084 /*
4085 * Decrement pm usage count upon bus suspend.
4086 * Count was incremented either upon cable
4087 * connect in OTG_STATE_B_IDLE or host
4088 * initiated resume after bus suspend in
4089 * OTG_STATE_B_SUSPEND state
4090 */
4091 pm_runtime_mark_last_busy(mdwc->dev);
4092 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004093 dbg_event(0xFF, "SUSP put",
4094 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004095 }
4096 break;
4097
4098 case OTG_STATE_B_SUSPEND:
4099 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
4100 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
4101 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004102 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004103 dwc3_otg_start_peripheral(mdwc, 0);
4104 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
4105 dev_dbg(mdwc->dev, "BSUSP !susp\n");
4106 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4107 /*
4108 * Increment pm usage count upon host
4109 * initiated resume. Count was decremented
4110 * upon bus suspend in
4111 * OTG_STATE_B_PERIPHERAL state.
4112 */
4113 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004114 dbg_event(0xFF, "!SUSP gsync",
4115 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004116 }
4117 break;
4118
4119 case OTG_STATE_A_IDLE:
4120 /* Switch to A-Device*/
4121 if (test_bit(ID, &mdwc->inputs)) {
4122 dev_dbg(mdwc->dev, "id\n");
4123 mdwc->otg_state = OTG_STATE_B_IDLE;
4124 mdwc->vbus_retry_count = 0;
4125 work = 1;
4126 } else {
4127 mdwc->otg_state = OTG_STATE_A_HOST;
4128 ret = dwc3_otg_start_host(mdwc, 1);
4129 if ((ret == -EPROBE_DEFER) &&
4130 mdwc->vbus_retry_count < 3) {
4131 /*
4132 * Get regulator failed as regulator driver is
4133 * not up yet. Will try to start host after 1sec
4134 */
4135 mdwc->otg_state = OTG_STATE_A_IDLE;
4136 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
4137 delay = VBUS_REG_CHECK_DELAY;
4138 work = 1;
4139 mdwc->vbus_retry_count++;
4140 } else if (ret) {
4141 dev_err(mdwc->dev, "unable to start host\n");
4142 mdwc->otg_state = OTG_STATE_A_IDLE;
4143 goto ret;
4144 }
4145 }
4146 break;
4147
4148 case OTG_STATE_A_HOST:
Manu Gautam976fdfc2016-08-18 09:27:35 +05304149 if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
4150 dev_dbg(mdwc->dev, "id || hc_died\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07004151 dwc3_otg_start_host(mdwc, 0);
4152 mdwc->otg_state = OTG_STATE_B_IDLE;
4153 mdwc->vbus_retry_count = 0;
Manu Gautam976fdfc2016-08-18 09:27:35 +05304154 mdwc->hc_died = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07004155 work = 1;
4156 } else {
4157 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004158 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004159 if (dwc)
4160 pm_runtime_resume(&dwc->xhci->dev);
4161 }
4162 break;
4163
4164 default:
4165 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
4166
4167 }
4168
4169 if (work)
4170 schedule_delayed_work(&mdwc->sm_work, delay);
4171
4172ret:
4173 return;
4174}
4175
4176#ifdef CONFIG_PM_SLEEP
4177static int dwc3_msm_pm_suspend(struct device *dev)
4178{
4179 int ret = 0;
4180 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4181 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4182
4183 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004184 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004185
4186 flush_workqueue(mdwc->dwc3_wq);
4187 if (!atomic_read(&dwc->in_lpm)) {
4188 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
4189 return -EBUSY;
4190 }
4191
4192 ret = dwc3_msm_suspend(mdwc);
4193 if (!ret)
4194 atomic_set(&mdwc->pm_suspended, 1);
4195
4196 return ret;
4197}
4198
4199static int dwc3_msm_pm_resume(struct device *dev)
4200{
4201 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004202 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004203
4204 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004205 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004206
Mayank Rana511f3b22016-08-02 12:00:11 -07004207 /* flush to avoid race in read/write of pm_suspended */
4208 flush_workqueue(mdwc->dwc3_wq);
4209 atomic_set(&mdwc->pm_suspended, 0);
4210
4211 /* kick in otg state machine */
4212 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
4213
4214 return 0;
4215}
4216#endif
4217
4218#ifdef CONFIG_PM
4219static int dwc3_msm_runtime_idle(struct device *dev)
4220{
Mayank Rana08e41922017-03-02 15:25:48 -08004221 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4222 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4223
Mayank Rana511f3b22016-08-02 12:00:11 -07004224 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004225 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004226
4227 return 0;
4228}
4229
4230static int dwc3_msm_runtime_suspend(struct device *dev)
4231{
4232 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004233 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004234
4235 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004236 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004237
4238 return dwc3_msm_suspend(mdwc);
4239}
4240
4241static int dwc3_msm_runtime_resume(struct device *dev)
4242{
4243 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004244 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004245
4246 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004247 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004248
4249 return dwc3_msm_resume(mdwc);
4250}
4251#endif
4252
4253static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
4254 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
4255 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
4256 dwc3_msm_runtime_idle)
4257};
4258
4259static const struct of_device_id of_dwc3_matach[] = {
4260 {
4261 .compatible = "qcom,dwc-usb3-msm",
4262 },
4263 { },
4264};
4265MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4266
4267static struct platform_driver dwc3_msm_driver = {
4268 .probe = dwc3_msm_probe,
4269 .remove = dwc3_msm_remove,
4270 .driver = {
4271 .name = "msm-dwc3",
4272 .pm = &dwc3_msm_dev_pm_ops,
4273 .of_match_table = of_dwc3_matach,
4274 },
4275};
4276
4277MODULE_LICENSE("GPL v2");
4278MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4279
4280static int dwc3_msm_init(void)
4281{
4282 return platform_driver_register(&dwc3_msm_driver);
4283}
4284module_init(dwc3_msm_init);
4285
4286static void __exit dwc3_msm_exit(void)
4287{
4288 platform_driver_unregister(&dwc3_msm_driver);
4289}
4290module_exit(dwc3_msm_exit);