blob: 57b4de8f0fa4b38058fab01960cfc5534bce4342 [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
Hemant Kumar006fae42017-07-12 18:11:25 -070058#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
59
Mayank Rana511f3b22016-08-02 12:00:11 -070060/* time out to wait for USB cable status notification (in ms)*/
61#define SM_INIT_TIMEOUT 30000
62
63/* AHB2PHY register offsets */
64#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
65
66/* AHB2PHY read/write waite value */
67#define ONE_READ_WRITE_WAIT 0x11
68
69/* cpu to fix usb interrupt */
70static int cpu_to_affin;
71module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
72MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
73
Mayank Ranaf70d8212017-06-12 14:02:07 -070074/* override for USB speed */
75static int override_usb_speed;
76module_param(override_usb_speed, int, 0644);
77MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
78
Mayank Rana511f3b22016-08-02 12:00:11 -070079/* XHCI registers */
80#define USB3_HCSPARAMS1 (0x4)
81#define USB3_PORTSC (0x420)
82
83/**
84 * USB QSCRATCH Hardware registers
85 *
86 */
87#define QSCRATCH_REG_OFFSET (0x000F8800)
88#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
89#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
90#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
91#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
92
93#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
94#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
95#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
96#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
97#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
98
99/* QSCRATCH_GENERAL_CFG register bit offset */
100#define PIPE_UTMI_CLK_SEL BIT(0)
101#define PIPE3_PHYSTATUS_SW BIT(3)
102#define PIPE_UTMI_CLK_DIS BIT(8)
103
104#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
105#define UTMI_OTG_VBUS_VALID BIT(20)
106#define SW_SESSVLD_SEL BIT(28)
107
108#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
109#define LANE0_PWR_PRESENT BIT(24)
110
111/* GSI related registers */
112#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
113#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
114
115#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
116#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
117#define GSI_CLK_EN_MASK BIT(12)
118#define BLOCK_GSI_WR_GO_MASK BIT(1)
119#define GSI_EN_MASK BIT(0)
120
121#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
122#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
123#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
124#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
125
126#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
127#define GSI_WR_CTRL_STATE_MASK BIT(15)
128
Mayank Ranaf4918d32016-12-15 13:35:55 -0800129#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
130#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
131#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
132#define DWC3_GEVENT_TYPE_GSI 0x3
133
Mayank Rana511f3b22016-08-02 12:00:11 -0700134struct dwc3_msm_req_complete {
135 struct list_head list_item;
136 struct usb_request *req;
137 void (*orig_complete)(struct usb_ep *ep,
138 struct usb_request *req);
139};
140
141enum dwc3_id_state {
142 DWC3_ID_GROUND = 0,
143 DWC3_ID_FLOAT,
144};
145
146/* for type c cable */
147enum plug_orientation {
148 ORIENTATION_NONE,
149 ORIENTATION_CC1,
150 ORIENTATION_CC2,
151};
152
Mayank Ranad339abe2017-05-31 09:19:49 -0700153enum msm_usb_irq {
154 HS_PHY_IRQ,
155 PWR_EVNT_IRQ,
156 DP_HS_PHY_IRQ,
157 DM_HS_PHY_IRQ,
158 SS_PHY_IRQ,
159 USB_MAX_IRQ
160};
161
162struct usb_irq {
163 char *name;
164 int irq;
165 bool enable;
166};
167
168static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
169 {"hs_phy_irq", 0},
170 {"pwr_event_irq", 0},
171 {"dp_hs_phy_irq", 0},
172 {"dm_hs_phy_irq", 0},
173 {"ss_phy_irq", 0},
174};
175
Mayank Rana511f3b22016-08-02 12:00:11 -0700176/* Input bits to state machine (mdwc->inputs) */
177
178#define ID 0
179#define B_SESS_VLD 1
180#define B_SUSPEND 2
181
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530182#define PM_QOS_SAMPLE_SEC 2
183#define PM_QOS_THRESHOLD 400
184
Mayank Rana511f3b22016-08-02 12:00:11 -0700185struct dwc3_msm {
186 struct device *dev;
187 void __iomem *base;
188 void __iomem *ahb2phy_base;
189 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700190 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700191 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
192 struct list_head req_complete_list;
193 struct clk *xo_clk;
194 struct clk *core_clk;
195 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800196 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700197 struct clk *iface_clk;
198 struct clk *sleep_clk;
199 struct clk *utmi_clk;
200 unsigned int utmi_clk_rate;
201 struct clk *utmi_clk_src;
202 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530203 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530205 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700206 struct regulator *dwc3_gdsc;
207
208 struct usb_phy *hs_phy, *ss_phy;
209
210 struct dbm *dbm;
211
212 /* VBUS regulator for host mode */
213 struct regulator *vbus_reg;
214 int vbus_retry_count;
215 bool resume_pending;
216 atomic_t pm_suspended;
Mayank Ranad339abe2017-05-31 09:19:49 -0700217 struct usb_irq wakeup_irq[USB_MAX_IRQ];
Mayank Rana511f3b22016-08-02 12:00:11 -0700218 struct work_struct resume_work;
219 struct work_struct restart_usb_work;
220 bool in_restart;
221 struct workqueue_struct *dwc3_wq;
222 struct delayed_work sm_work;
223 unsigned long inputs;
224 unsigned int max_power;
225 bool charging_disabled;
226 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700227 u32 bus_perf_client;
228 struct msm_bus_scale_pdata *bus_scale_table;
229 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700230 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700231 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800232 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700233 unsigned int tx_fifo_size;
234 bool vbus_active;
235 bool suspend;
236 bool disable_host_mode_pm;
Mayank Ranad339abe2017-05-31 09:19:49 -0700237 bool use_pdc_interrupts;
Mayank Rana511f3b22016-08-02 12:00:11 -0700238 enum dwc3_id_state id_state;
239 unsigned long lpm_flags;
240#define MDWC3_SS_PHY_SUSPEND BIT(0)
241#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
242#define MDWC3_POWER_COLLAPSE BIT(2)
243
244 unsigned int irq_to_affin;
245 struct notifier_block dwc3_cpu_notifier;
Manu Gautam976fdfc2016-08-18 09:27:35 +0530246 struct notifier_block usbdev_nb;
247 bool hc_died;
Mayank Rana511f3b22016-08-02 12:00:11 -0700248
249 struct extcon_dev *extcon_vbus;
250 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800251 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700252 struct notifier_block vbus_nb;
253 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800254 struct notifier_block eud_event_nb;
Mayank Rana54d60432017-07-18 12:10:04 -0700255 struct notifier_block host_restart_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700256
Jack Pham4d4e9342016-12-07 19:25:02 -0800257 struct notifier_block host_nb;
258
Mayank Rana511f3b22016-08-02 12:00:11 -0700259 atomic_t in_p3;
260 unsigned int lpm_to_suspend_delay;
261 bool init;
262 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800263 u32 num_gsi_event_buffers;
264 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530265 int pm_qos_latency;
266 struct pm_qos_request pm_qos_req_dma;
267 struct delayed_work perf_vote_work;
Hemant Kumar006fae42017-07-12 18:11:25 -0700268 struct delayed_work sdp_check;
Mayank Rana511f3b22016-08-02 12:00:11 -0700269};
270
271#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
272#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
273#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
274
275#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
276#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
277#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
278
279#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
280#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
281#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
282
283#define DSTS_CONNECTSPD_SS 0x4
284
285
286static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
287static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800288static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana54d60432017-07-18 12:10:04 -0700289static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
290 unsigned long event, void *ptr);
Mayank Ranaf70d8212017-06-12 14:02:07 -0700291
292static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
293{
294
295 return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
296 (speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
297 && (speed <= dwc->maximum_speed));
298}
299
Mayank Rana511f3b22016-08-02 12:00:11 -0700300/**
301 *
302 * Read register with debug info.
303 *
304 * @base - DWC3 base virtual address.
305 * @offset - register offset.
306 *
307 * @return u32
308 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700309static inline u32 dwc3_msm_read_reg(void __iomem *base, u32 offset)
Mayank Rana511f3b22016-08-02 12:00:11 -0700310{
311 u32 val = ioread32(base + offset);
312 return val;
313}
314
315/**
316 * Read register masked field with debug info.
317 *
318 * @base - DWC3 base virtual address.
319 * @offset - register offset.
320 * @mask - register bitmask.
321 *
322 * @return u32
323 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700324static inline u32 dwc3_msm_read_reg_field(void __iomem *base,
Mayank Rana511f3b22016-08-02 12:00:11 -0700325 u32 offset,
326 const u32 mask)
327{
Mayank Ranad796cab2017-07-11 15:34:12 -0700328 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700329 u32 val = ioread32(base + offset);
330
331 val &= mask; /* clear other bits */
332 val >>= shift;
333 return val;
334}
335
336/**
337 *
338 * Write register with debug info.
339 *
340 * @base - DWC3 base virtual address.
341 * @offset - register offset.
342 * @val - value to write.
343 *
344 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700345static inline void dwc3_msm_write_reg(void __iomem *base, u32 offset, u32 val)
Mayank Rana511f3b22016-08-02 12:00:11 -0700346{
347 iowrite32(val, base + offset);
348}
349
350/**
351 * Write register masked field with debug info.
352 *
353 * @base - DWC3 base virtual address.
354 * @offset - register offset.
355 * @mask - register bitmask.
356 * @val - value to write.
357 *
358 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700359static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700360 const u32 mask, u32 val)
361{
Mayank Ranad796cab2017-07-11 15:34:12 -0700362 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700363 u32 tmp = ioread32(base + offset);
364
365 tmp &= ~mask; /* clear written bits */
366 val = tmp | (val << shift);
367 iowrite32(val, base + offset);
368}
369
370/**
371 * Write register and read back masked value to confirm it is written
372 *
373 * @base - DWC3 base virtual address.
374 * @offset - register offset.
375 * @mask - register bitmask specifying what should be updated
376 * @val - value to write.
377 *
378 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700379static inline void dwc3_msm_write_readback(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700380 const u32 mask, u32 val)
381{
382 u32 write_val, tmp = ioread32(base + offset);
383
384 tmp &= ~mask; /* retain other bits */
385 write_val = tmp | val;
386
387 iowrite32(write_val, base + offset);
388
389 /* Read back to see if val was written */
390 tmp = ioread32(base + offset);
391 tmp &= mask; /* clear other bits */
392
393 if (tmp != val)
394 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
395 __func__, val, offset);
396}
397
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800398static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
399{
400 int i, num_ports;
401 u32 reg;
402
403 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
404 num_ports = HCS_MAX_PORTS(reg);
405
406 for (i = 0; i < num_ports; i++) {
407 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
408 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
409 return true;
410 }
411
412 return false;
413}
414
Mayank Rana511f3b22016-08-02 12:00:11 -0700415static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
416{
417 int i, num_ports;
418 u32 reg;
419
420 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
421 num_ports = HCS_MAX_PORTS(reg);
422
423 for (i = 0; i < num_ports; i++) {
424 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
425 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
426 return true;
427 }
428
429 return false;
430}
431
432static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
433{
434 u8 speed;
435
436 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
437 return !!(speed & DSTS_CONNECTSPD_SS);
438}
439
440static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
441{
442 if (mdwc->in_host_mode)
443 return dwc3_msm_is_host_superspeed(mdwc);
444
445 return dwc3_msm_is_dev_superspeed(mdwc);
446}
447
448#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
449/**
450 * Configure the DBM with the BAM's data fifo.
451 * This function is called by the USB BAM Driver
452 * upon initialization.
453 *
454 * @ep - pointer to usb endpoint.
455 * @addr - address of data fifo.
456 * @size - size of data fifo.
457 *
458 */
459int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
460 u32 size, u8 dst_pipe_idx)
461{
462 struct dwc3_ep *dep = to_dwc3_ep(ep);
463 struct dwc3 *dwc = dep->dwc;
464 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
465
466 dev_dbg(mdwc->dev, "%s\n", __func__);
467
468 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
469 dst_pipe_idx);
470}
471
472
473/**
474* Cleanups for msm endpoint on request complete.
475*
476* Also call original request complete.
477*
478* @usb_ep - pointer to usb_ep instance.
479* @request - pointer to usb_request instance.
480*
481* @return int - 0 on success, negative on error.
482*/
483static void dwc3_msm_req_complete_func(struct usb_ep *ep,
484 struct usb_request *request)
485{
486 struct dwc3_ep *dep = to_dwc3_ep(ep);
487 struct dwc3 *dwc = dep->dwc;
488 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
489 struct dwc3_msm_req_complete *req_complete = NULL;
490
491 /* Find original request complete function and remove it from list */
492 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
493 if (req_complete->req == request)
494 break;
495 }
496 if (!req_complete || req_complete->req != request) {
497 dev_err(dep->dwc->dev, "%s: could not find the request\n",
498 __func__);
499 return;
500 }
501 list_del(&req_complete->list_item);
502
503 /*
504 * Release another one TRB to the pool since DBM queue took 2 TRBs
505 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
506 * released only one.
507 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700508 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700509
510 /* Unconfigure dbm ep */
511 dbm_ep_unconfig(mdwc->dbm, dep->number);
512
513 /*
514 * If this is the last endpoint we unconfigured, than reset also
515 * the event buffers; unless unconfiguring the ep due to lpm,
516 * in which case the event buffer only gets reset during the
517 * block reset.
518 */
519 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
520 !dbm_reset_ep_after_lpm(mdwc->dbm))
521 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
522
523 /*
524 * Call original complete function, notice that dwc->lock is already
525 * taken by the caller of this function (dwc3_gadget_giveback()).
526 */
527 request->complete = req_complete->orig_complete;
528 if (request->complete)
529 request->complete(ep, request);
530
531 kfree(req_complete);
532}
533
534
535/**
536* Helper function
537*
538* Reset DBM endpoint.
539*
540* @mdwc - pointer to dwc3_msm instance.
541* @dep - pointer to dwc3_ep instance.
542*
543* @return int - 0 on success, negative on error.
544*/
545static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
546{
547 int ret;
548
549 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
550
551 /* Reset the dbm endpoint */
552 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
553 if (ret) {
554 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
555 __func__);
556 return ret;
557 }
558
559 /*
560 * The necessary delay between asserting and deasserting the dbm ep
561 * reset is based on the number of active endpoints. If there is more
562 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
563 * delay will suffice.
564 */
565 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
566 usleep_range(1000, 1200);
567 else
568 udelay(10);
569 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
570 if (ret) {
571 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
572 __func__);
573 return ret;
574 }
575
576 return 0;
577}
578
579/**
580* Reset the DBM endpoint which is linked to the given USB endpoint.
581*
582* @usb_ep - pointer to usb_ep instance.
583*
584* @return int - 0 on success, negative on error.
585*/
586
587int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
588{
589 struct dwc3_ep *dep = to_dwc3_ep(ep);
590 struct dwc3 *dwc = dep->dwc;
591 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
592
593 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
594}
595EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
596
597
598/**
599* Helper function.
600* See the header of the dwc3_msm_ep_queue function.
601*
602* @dwc3_ep - pointer to dwc3_ep instance.
603* @req - pointer to dwc3_request instance.
604*
605* @return int - 0 on success, negative on error.
606*/
607static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
608{
609 struct dwc3_trb *trb;
610 struct dwc3_trb *trb_link;
611 struct dwc3_gadget_ep_cmd_params params;
612 u32 cmd;
613 int ret = 0;
614
Mayank Rana83ad5822016-08-09 14:17:22 -0700615 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700616 * this request is issued with start transfer. The request will be out
617 * from this list in 2 cases. The first is that the transfer will be
618 * completed (not if the transfer is endless using a circular TRBs with
619 * with link TRB). The second case is an option to do stop stransfer,
620 * this can be initiated by the function driver when calling dequeue.
621 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700622 req->started = true;
623 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700624
625 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana9ca186c2017-06-19 17:57:21 -0700626 trb = &dep->trb_pool[dep->trb_enqueue];
627 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700628 memset(trb, 0, sizeof(*trb));
629
630 req->trb = trb;
631 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
632 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
633 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
634 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
635 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
636
637 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana9ca186c2017-06-19 17:57:21 -0700638 trb_link = &dep->trb_pool[dep->trb_enqueue];
639 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700640 memset(trb_link, 0, sizeof(*trb_link));
641
642 trb_link->bpl = lower_32_bits(req->trb_dma);
643 trb_link->bph = DBM_TRB_BIT |
644 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
645 trb_link->size = 0;
646 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
647
648 /*
649 * Now start the transfer
650 */
651 memset(&params, 0, sizeof(params));
652 params.param0 = 0; /* TDAddr High */
653 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
654
655 /* DBM requires IOC to be set */
656 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700657 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700658 if (ret < 0) {
659 dev_dbg(dep->dwc->dev,
660 "%s: failed to send STARTTRANSFER command\n",
661 __func__);
662
663 list_del(&req->list);
664 return ret;
665 }
666 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700667 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700668
669 return ret;
670}
671
672/**
673* Queue a usb request to the DBM endpoint.
674* This function should be called after the endpoint
675* was enabled by the ep_enable.
676*
677* This function prepares special structure of TRBs which
678* is familiar with the DBM HW, so it will possible to use
679* this endpoint in DBM mode.
680*
681* The TRBs prepared by this function, is one normal TRB
682* which point to a fake buffer, followed by a link TRB
683* that points to the first TRB.
684*
685* The API of this function follow the regular API of
686* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
687*
688* @usb_ep - pointer to usb_ep instance.
689* @request - pointer to usb_request instance.
690* @gfp_flags - possible flags.
691*
692* @return int - 0 on success, negative on error.
693*/
694static int dwc3_msm_ep_queue(struct usb_ep *ep,
695 struct usb_request *request, gfp_t gfp_flags)
696{
697 struct dwc3_request *req = to_dwc3_request(request);
698 struct dwc3_ep *dep = to_dwc3_ep(ep);
699 struct dwc3 *dwc = dep->dwc;
700 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
701 struct dwc3_msm_req_complete *req_complete;
702 unsigned long flags;
703 int ret = 0, size;
704 u8 bam_pipe;
705 bool producer;
706 bool disable_wb;
707 bool internal_mem;
708 bool ioc;
709 bool superspeed;
710
711 if (!(request->udc_priv & MSM_SPS_MODE)) {
712 /* Not SPS mode, call original queue */
713 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
714 __func__);
715
716 return (mdwc->original_ep_ops[dep->number])->queue(ep,
717 request,
718 gfp_flags);
719 }
720
721 /* HW restriction regarding TRB size (8KB) */
722 if (req->request.length < 0x2000) {
723 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
724 return -EINVAL;
725 }
726
727 /*
728 * Override req->complete function, but before doing that,
729 * store it's original pointer in the req_complete_list.
730 */
731 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
732 if (!req_complete)
733 return -ENOMEM;
734
735 req_complete->req = request;
736 req_complete->orig_complete = request->complete;
737 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
738 request->complete = dwc3_msm_req_complete_func;
739
740 /*
741 * Configure the DBM endpoint
742 */
743 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
744 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
745 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
746 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
747 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
748
749 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
750 disable_wb, internal_mem, ioc);
751 if (ret < 0) {
752 dev_err(mdwc->dev,
753 "error %d after calling dbm_ep_config\n", ret);
754 return ret;
755 }
756
757 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
758 __func__, request, ep->name, request->length);
759 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
760 dbm_event_buffer_config(mdwc->dbm,
761 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
762 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
763 DWC3_GEVNTSIZ_SIZE(size));
764
765 /*
766 * We must obtain the lock of the dwc3 core driver,
767 * including disabling interrupts, so we will be sure
768 * that we are the only ones that configure the HW device
769 * core and ensure that we queuing the request will finish
770 * as soon as possible so we will release back the lock.
771 */
772 spin_lock_irqsave(&dwc->lock, flags);
773 if (!dep->endpoint.desc) {
774 dev_err(mdwc->dev,
775 "%s: trying to queue request %p to disabled ep %s\n",
776 __func__, request, ep->name);
777 ret = -EPERM;
778 goto err;
779 }
780
781 if (dep->number == 0 || dep->number == 1) {
782 dev_err(mdwc->dev,
783 "%s: trying to queue dbm request %p to control ep %s\n",
784 __func__, request, ep->name);
785 ret = -EPERM;
786 goto err;
787 }
788
789
Mayank Rana83ad5822016-08-09 14:17:22 -0700790 if (dep->trb_dequeue != dep->trb_enqueue ||
791 !list_empty(&dep->pending_list)
792 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700793 dev_err(mdwc->dev,
794 "%s: trying to queue dbm request %p tp ep %s\n",
795 __func__, request, ep->name);
796 ret = -EPERM;
797 goto err;
798 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700799 dep->trb_dequeue = 0;
800 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700801 }
802
803 ret = __dwc3_msm_ep_queue(dep, req);
804 if (ret < 0) {
805 dev_err(mdwc->dev,
806 "error %d after calling __dwc3_msm_ep_queue\n", ret);
807 goto err;
808 }
809
810 spin_unlock_irqrestore(&dwc->lock, flags);
811 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
812 dbm_set_speed(mdwc->dbm, (u8)superspeed);
813
814 return 0;
815
816err:
817 spin_unlock_irqrestore(&dwc->lock, flags);
818 kfree(req_complete);
819 return ret;
820}
821
822/*
823* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
824*
825* @usb_ep - pointer to usb_ep instance.
826*
827* @return int - XferRscIndex
828*/
829static inline int gsi_get_xfer_index(struct usb_ep *ep)
830{
831 struct dwc3_ep *dep = to_dwc3_ep(ep);
832
833 return dep->resource_index;
834}
835
836/*
837* Fills up the GSI channel information needed in call to IPA driver
838* for GSI channel creation.
839*
840* @usb_ep - pointer to usb_ep instance.
841* @ch_info - output parameter with requested channel info
842*/
843static void gsi_get_channel_info(struct usb_ep *ep,
844 struct gsi_channel_info *ch_info)
845{
846 struct dwc3_ep *dep = to_dwc3_ep(ep);
847 int last_trb_index = 0;
848 struct dwc3 *dwc = dep->dwc;
849 struct usb_gsi_request *request = ch_info->ch_req;
850
851 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
852 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Ranaac776d12017-04-18 16:56:13 -0700853 DWC3_DEP_BASE(dep->number) + DWC3_DEPCMD);
854
Mayank Rana511f3b22016-08-02 12:00:11 -0700855 ch_info->depcmd_hi_addr = 0;
856
857 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
858 &dep->trb_pool[0]);
859 /* Convert to multipled of 1KB */
860 ch_info->const_buffer_size = request->buf_len/1024;
861
862 /* IN direction */
863 if (dep->direction) {
864 /*
865 * Multiply by size of each TRB for xfer_ring_len in bytes.
866 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
867 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
868 */
869 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
870 last_trb_index = 2 * request->num_bufs + 2;
871 } else { /* OUT direction */
872 /*
873 * Multiply by size of each TRB for xfer_ring_len in bytes.
874 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
875 * LINK TRB.
876 */
Mayank Rana64d136b2016-11-01 21:01:34 -0700877 ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10;
878 last_trb_index = request->num_bufs + 2;
Mayank Rana511f3b22016-08-02 12:00:11 -0700879 }
880
881 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
882 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
883 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
884 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
885 DWC3_GEVNTCOUNT(ep->ep_intr_num));
886 ch_info->gevntcount_hi_addr = 0;
887
888 dev_dbg(dwc->dev,
889 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
890 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
891 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
892}
893
894/*
895* Perform StartXfer on GSI EP. Stores XferRscIndex.
896*
897* @usb_ep - pointer to usb_ep instance.
898*
899* @return int - 0 on success
900*/
901static int gsi_startxfer_for_ep(struct usb_ep *ep)
902{
903 int ret;
904 struct dwc3_gadget_ep_cmd_params params;
905 u32 cmd;
906 struct dwc3_ep *dep = to_dwc3_ep(ep);
907 struct dwc3 *dwc = dep->dwc;
908
909 memset(&params, 0, sizeof(params));
910 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
911 params.param0 |= (ep->ep_intr_num << 16);
912 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
913 &dep->trb_pool[0]));
914 cmd = DWC3_DEPCMD_STARTTRANSFER;
915 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700916 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700917
918 if (ret < 0)
919 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700920 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700921 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
922 return ret;
923}
924
925/*
926* Store Ring Base and Doorbell Address for GSI EP
927* for GSI channel creation.
928*
929* @usb_ep - pointer to usb_ep instance.
930* @dbl_addr - Doorbell address obtained from IPA driver
931*/
932static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
933{
934 struct dwc3_ep *dep = to_dwc3_ep(ep);
935 struct dwc3 *dwc = dep->dwc;
936 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
937 int n = ep->ep_intr_num - 1;
938
939 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
940 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
941 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
942
943 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
944 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
945 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
946 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
947}
948
949/*
Mayank Rana64d136b2016-11-01 21:01:34 -0700950* Rings Doorbell for GSI Channel
Mayank Rana511f3b22016-08-02 12:00:11 -0700951*
952* @usb_ep - pointer to usb_ep instance.
953* @request - pointer to GSI request. This is used to pass in the
954* address of the GSI doorbell obtained from IPA driver
955*/
Mayank Rana64d136b2016-11-01 21:01:34 -0700956static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
Mayank Rana511f3b22016-08-02 12:00:11 -0700957{
958 void __iomem *gsi_dbl_address_lsb;
959 void __iomem *gsi_dbl_address_msb;
960 dma_addr_t offset;
961 u64 dbl_addr = *((u64 *)request->buf_base_addr);
962 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
963 u32 dbl_hi_addr = (dbl_addr >> 32);
Mayank Rana511f3b22016-08-02 12:00:11 -0700964 struct dwc3_ep *dep = to_dwc3_ep(ep);
965 struct dwc3 *dwc = dep->dwc;
966 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Rana64d136b2016-11-01 21:01:34 -0700967 int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2)
968 : (request->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -0700969
970 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
971 dbl_lo_addr, sizeof(u32));
972 if (!gsi_dbl_address_lsb)
973 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
974
975 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
976 dbl_hi_addr, sizeof(u32));
977 if (!gsi_dbl_address_msb)
978 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
979
980 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
Mayank Rana64d136b2016-11-01 21:01:34 -0700981 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
982 &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
Mayank Rana511f3b22016-08-02 12:00:11 -0700983
984 writel_relaxed(offset, gsi_dbl_address_lsb);
985 writel_relaxed(0, gsi_dbl_address_msb);
986}
987
988/*
989* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
990*
991* @usb_ep - pointer to usb_ep instance.
992* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
993*
994* @return int - 0 on success
995*/
996static int gsi_updatexfer_for_ep(struct usb_ep *ep,
997 struct usb_gsi_request *request)
998{
999 int i;
1000 int ret;
1001 u32 cmd;
1002 int num_trbs = request->num_bufs + 1;
1003 struct dwc3_trb *trb;
1004 struct dwc3_gadget_ep_cmd_params params;
1005 struct dwc3_ep *dep = to_dwc3_ep(ep);
1006 struct dwc3 *dwc = dep->dwc;
1007
1008 for (i = 0; i < num_trbs - 1; i++) {
1009 trb = &dep->trb_pool[i];
1010 trb->ctrl |= DWC3_TRB_CTRL_HWO;
1011 }
1012
1013 memset(&params, 0, sizeof(params));
1014 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1015 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -07001016 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001017 dep->flags |= DWC3_EP_BUSY;
1018 if (ret < 0)
1019 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
1020 return ret;
1021}
1022
1023/*
1024* Perform EndXfer on particular GSI EP.
1025*
1026* @usb_ep - pointer to usb_ep instance.
1027*/
1028static void gsi_endxfer_for_ep(struct usb_ep *ep)
1029{
1030 struct dwc3_ep *dep = to_dwc3_ep(ep);
1031 struct dwc3 *dwc = dep->dwc;
1032
1033 dwc3_stop_active_transfer(dwc, dep->number, true);
1034}
1035
1036/*
1037* Allocates and configures TRBs for GSI EPs.
1038*
1039* @usb_ep - pointer to usb_ep instance.
1040* @request - pointer to GSI request.
1041*
1042* @return int - 0 on success
1043*/
1044static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1045{
1046 int i = 0;
1047 dma_addr_t buffer_addr = req->dma;
1048 struct dwc3_ep *dep = to_dwc3_ep(ep);
1049 struct dwc3 *dwc = dep->dwc;
1050 struct dwc3_trb *trb;
1051 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
Mayank Rana64d136b2016-11-01 21:01:34 -07001052 : (req->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -07001053
Jack Phambbe27962017-03-23 18:42:26 -07001054 dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
Mayank Rana511f3b22016-08-02 12:00:11 -07001055 num_trbs * sizeof(struct dwc3_trb),
1056 num_trbs * sizeof(struct dwc3_trb), 0);
1057 if (!dep->trb_dma_pool) {
1058 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1059 dep->name);
1060 return -ENOMEM;
1061 }
1062
1063 dep->num_trbs = num_trbs;
1064
1065 dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
1066 GFP_KERNEL, &dep->trb_pool_dma);
1067 if (!dep->trb_pool) {
1068 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
1069 dep->name);
1070 return -ENOMEM;
1071 }
1072
1073 /* IN direction */
1074 if (dep->direction) {
1075 for (i = 0; i < num_trbs ; i++) {
1076 trb = &dep->trb_pool[i];
1077 memset(trb, 0, sizeof(*trb));
1078 /* Set up first n+1 TRBs for ZLPs */
1079 if (i < (req->num_bufs + 1)) {
1080 trb->bpl = 0;
1081 trb->bph = 0;
1082 trb->size = 0;
1083 trb->ctrl = DWC3_TRBCTL_NORMAL
1084 | DWC3_TRB_CTRL_IOC;
1085 continue;
1086 }
1087
1088 /* Setup n TRBs pointing to valid buffers */
1089 trb->bpl = lower_32_bits(buffer_addr);
1090 trb->bph = 0;
1091 trb->size = 0;
1092 trb->ctrl = DWC3_TRBCTL_NORMAL
1093 | DWC3_TRB_CTRL_IOC;
1094 buffer_addr += req->buf_len;
1095
1096 /* Set up the Link TRB at the end */
1097 if (i == (num_trbs - 1)) {
1098 trb->bpl = dwc3_trb_dma_offset(dep,
1099 &dep->trb_pool[0]);
1100 trb->bph = (1 << 23) | (1 << 21)
1101 | (ep->ep_intr_num << 16);
1102 trb->size = 0;
1103 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1104 | DWC3_TRB_CTRL_HWO;
1105 }
1106 }
1107 } else { /* OUT direction */
1108
1109 for (i = 0; i < num_trbs ; i++) {
1110
1111 trb = &dep->trb_pool[i];
1112 memset(trb, 0, sizeof(*trb));
Mayank Rana64d136b2016-11-01 21:01:34 -07001113 /* Setup LINK TRB to start with TRB ring */
1114 if (i == 0) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001115 trb->bpl = dwc3_trb_dma_offset(dep,
Mayank Rana64d136b2016-11-01 21:01:34 -07001116 &dep->trb_pool[1]);
1117 trb->ctrl = DWC3_TRBCTL_LINK_TRB;
1118 } else if (i == (num_trbs - 1)) {
1119 /* Set up the Link TRB at the end */
1120 trb->bpl = dwc3_trb_dma_offset(dep,
1121 &dep->trb_pool[0]);
Mayank Rana511f3b22016-08-02 12:00:11 -07001122 trb->bph = (1 << 23) | (1 << 21)
1123 | (ep->ep_intr_num << 16);
Mayank Rana511f3b22016-08-02 12:00:11 -07001124 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1125 | DWC3_TRB_CTRL_HWO;
Mayank Rana64d136b2016-11-01 21:01:34 -07001126 } else {
1127 trb->bpl = lower_32_bits(buffer_addr);
1128 trb->size = req->buf_len;
1129 buffer_addr += req->buf_len;
1130 trb->ctrl = DWC3_TRBCTL_NORMAL
1131 | DWC3_TRB_CTRL_IOC
1132 | DWC3_TRB_CTRL_CSP
1133 | DWC3_TRB_CTRL_ISP_IMI;
Mayank Rana511f3b22016-08-02 12:00:11 -07001134 }
1135 }
1136 }
Mayank Rana64d136b2016-11-01 21:01:34 -07001137
1138 pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
1139 trb = &dep->trb_pool[0];
1140 if (trb) {
1141 for (i = 0; i < num_trbs; i++) {
1142 pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
1143 i, (unsigned long)dwc3_trb_dma_offset(dep,
1144 &dep->trb_pool[i]), trb->bpl, trb->bph,
1145 trb->size, trb->ctrl);
1146 trb++;
1147 }
1148 }
1149
Mayank Rana511f3b22016-08-02 12:00:11 -07001150 return 0;
1151}
1152
1153/*
1154* Frees TRBs for GSI EPs.
1155*
1156* @usb_ep - pointer to usb_ep instance.
1157*
1158*/
1159static void gsi_free_trbs(struct usb_ep *ep)
1160{
1161 struct dwc3_ep *dep = to_dwc3_ep(ep);
1162
1163 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1164 return;
1165
1166 /* Free TRBs and TRB pool for EP */
1167 if (dep->trb_dma_pool) {
1168 dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
1169 dep->trb_pool_dma);
1170 dma_pool_destroy(dep->trb_dma_pool);
1171 dep->trb_pool = NULL;
1172 dep->trb_pool_dma = 0;
1173 dep->trb_dma_pool = NULL;
1174 }
1175}
1176/*
1177* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1178*
1179* @usb_ep - pointer to usb_ep instance.
1180* @request - pointer to GSI request.
1181*/
1182static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1183{
1184 struct dwc3_ep *dep = to_dwc3_ep(ep);
1185 struct dwc3 *dwc = dep->dwc;
1186 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1187 struct dwc3_gadget_ep_cmd_params params;
1188 const struct usb_endpoint_descriptor *desc = ep->desc;
1189 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
Mayank Ranaac1200c2017-04-25 13:48:46 -07001190 u32 reg;
1191 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001192
1193 memset(&params, 0x00, sizeof(params));
1194
1195 /* Configure GSI EP */
1196 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1197 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1198
1199 /* Burst size is only needed in SuperSpeed mode */
1200 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1201 u32 burst = dep->endpoint.maxburst - 1;
1202
1203 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1204 }
1205
1206 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1207 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1208 | DWC3_DEPCFG_STREAM_EVENT_EN;
1209 dep->stream_capable = true;
1210 }
1211
1212 /* Set EP number */
1213 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1214
1215 /* Set interrupter number for GSI endpoints */
1216 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1217
1218 /* Enable XferInProgress and XferComplete Interrupts */
1219 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1220 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1221 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1222 /*
1223 * We must use the lower 16 TX FIFOs even though
1224 * HW might have more
1225 */
1226 /* Remove FIFO Number for GSI EP*/
1227 if (dep->direction)
1228 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1229
1230 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1231
1232 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1233 params.param0, params.param1, params.param2, dep->name);
1234
Mayank Rana83ad5822016-08-09 14:17:22 -07001235 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001236
1237 /* Set XferRsc Index for GSI EP */
1238 if (!(dep->flags & DWC3_EP_ENABLED)) {
Mayank Ranaac1200c2017-04-25 13:48:46 -07001239 ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
1240 if (ret)
1241 return;
1242
Mayank Rana511f3b22016-08-02 12:00:11 -07001243 memset(&params, 0x00, sizeof(params));
1244 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001245 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001246 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1247
1248 dep->endpoint.desc = desc;
1249 dep->comp_desc = comp_desc;
1250 dep->type = usb_endpoint_type(desc);
1251 dep->flags |= DWC3_EP_ENABLED;
1252 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1253 reg |= DWC3_DALEPENA_EP(dep->number);
1254 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1255 }
1256
1257}
1258
1259/*
1260* Enables USB wrapper for GSI
1261*
1262* @usb_ep - pointer to usb_ep instance.
1263*/
1264static void gsi_enable(struct usb_ep *ep)
1265{
1266 struct dwc3_ep *dep = to_dwc3_ep(ep);
1267 struct dwc3 *dwc = dep->dwc;
1268 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1269
1270 dwc3_msm_write_reg_field(mdwc->base,
1271 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1272 dwc3_msm_write_reg_field(mdwc->base,
1273 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1274 dwc3_msm_write_reg_field(mdwc->base,
1275 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1276 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1277 dwc3_msm_write_reg_field(mdwc->base,
1278 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1279}
1280
1281/*
1282* Block or allow doorbell towards GSI
1283*
1284* @usb_ep - pointer to usb_ep instance.
1285* @request - pointer to GSI request. In this case num_bufs is used as a bool
1286* to set or clear the doorbell bit
1287*/
1288static void gsi_set_clear_dbell(struct usb_ep *ep,
1289 bool block_db)
1290{
1291
1292 struct dwc3_ep *dep = to_dwc3_ep(ep);
1293 struct dwc3 *dwc = dep->dwc;
1294 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1295
1296 dwc3_msm_write_reg_field(mdwc->base,
1297 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1298}
1299
1300/*
1301* Performs necessary checks before stopping GSI channels
1302*
1303* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1304*/
1305static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1306{
1307 u32 timeout = 1500;
1308 u32 reg = 0;
1309 struct dwc3_ep *dep = to_dwc3_ep(ep);
1310 struct dwc3 *dwc = dep->dwc;
1311 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1312
1313 while (dwc3_msm_read_reg_field(mdwc->base,
1314 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1315 if (!timeout--) {
1316 dev_err(mdwc->dev,
1317 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1318 return false;
1319 }
1320 }
1321 /* Check for U3 only if we are not handling Function Suspend */
1322 if (!f_suspend) {
1323 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1324 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1325 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1326 return false;
1327 }
1328 }
1329
1330 return true;
1331}
1332
1333
1334/**
1335* Performs GSI operations or GSI EP related operations.
1336*
1337* @usb_ep - pointer to usb_ep instance.
1338* @op_data - pointer to opcode related data.
1339* @op - GSI related or GSI EP related op code.
1340*
1341* @return int - 0 on success, negative on error.
1342* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1343*/
1344static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1345 void *op_data, enum gsi_ep_op op)
1346{
1347 u32 ret = 0;
1348 struct dwc3_ep *dep = to_dwc3_ep(ep);
1349 struct dwc3 *dwc = dep->dwc;
1350 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1351 struct usb_gsi_request *request;
1352 struct gsi_channel_info *ch_info;
1353 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001354 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001355
1356 switch (op) {
1357 case GSI_EP_OP_PREPARE_TRBS:
1358 request = (struct usb_gsi_request *)op_data;
1359 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1360 ret = gsi_prepare_trbs(ep, request);
1361 break;
1362 case GSI_EP_OP_FREE_TRBS:
1363 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1364 gsi_free_trbs(ep);
1365 break;
1366 case GSI_EP_OP_CONFIG:
1367 request = (struct usb_gsi_request *)op_data;
1368 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001369 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001370 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001371 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001372 break;
1373 case GSI_EP_OP_STARTXFER:
1374 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001375 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001376 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001377 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001378 break;
1379 case GSI_EP_OP_GET_XFER_IDX:
1380 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1381 ret = gsi_get_xfer_index(ep);
1382 break;
1383 case GSI_EP_OP_STORE_DBL_INFO:
1384 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1385 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1386 break;
1387 case GSI_EP_OP_ENABLE_GSI:
1388 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1389 gsi_enable(ep);
1390 break;
1391 case GSI_EP_OP_GET_CH_INFO:
1392 ch_info = (struct gsi_channel_info *)op_data;
1393 gsi_get_channel_info(ep, ch_info);
1394 break;
Mayank Rana64d136b2016-11-01 21:01:34 -07001395 case GSI_EP_OP_RING_DB:
Mayank Rana511f3b22016-08-02 12:00:11 -07001396 request = (struct usb_gsi_request *)op_data;
Mayank Rana64d136b2016-11-01 21:01:34 -07001397 dbg_print(0xFF, "RING_DB", 0, ep->name);
1398 gsi_ring_db(ep, request);
Mayank Rana511f3b22016-08-02 12:00:11 -07001399 break;
1400 case GSI_EP_OP_UPDATEXFER:
1401 request = (struct usb_gsi_request *)op_data;
1402 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001403 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001404 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001405 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001406 break;
1407 case GSI_EP_OP_ENDXFER:
1408 request = (struct usb_gsi_request *)op_data;
1409 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001410 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001411 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001412 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001413 break;
1414 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1415 block_db = *((bool *)op_data);
1416 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1417 block_db);
1418 gsi_set_clear_dbell(ep, block_db);
1419 break;
1420 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1421 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1422 f_suspend = *((bool *)op_data);
1423 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1424 break;
1425 case GSI_EP_OP_DISABLE:
1426 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1427 ret = ep->ops->disable(ep);
1428 break;
1429 default:
1430 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1431 }
1432
1433 return ret;
1434}
1435
1436/**
1437 * Configure MSM endpoint.
1438 * This function do specific configurations
1439 * to an endpoint which need specific implementaion
1440 * in the MSM architecture.
1441 *
1442 * This function should be called by usb function/class
1443 * layer which need a support from the specific MSM HW
1444 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1445 *
1446 * @ep - a pointer to some usb_ep instance
1447 *
1448 * @return int - 0 on success, negetive on error.
1449 */
1450int msm_ep_config(struct usb_ep *ep)
1451{
1452 struct dwc3_ep *dep = to_dwc3_ep(ep);
1453 struct dwc3 *dwc = dep->dwc;
1454 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1455 struct usb_ep_ops *new_ep_ops;
1456
1457
1458 /* Save original ep ops for future restore*/
1459 if (mdwc->original_ep_ops[dep->number]) {
1460 dev_err(mdwc->dev,
1461 "ep [%s,%d] already configured as msm endpoint\n",
1462 ep->name, dep->number);
1463 return -EPERM;
1464 }
1465 mdwc->original_ep_ops[dep->number] = ep->ops;
1466
1467 /* Set new usb ops as we like */
1468 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1469 if (!new_ep_ops)
1470 return -ENOMEM;
1471
1472 (*new_ep_ops) = (*ep->ops);
1473 new_ep_ops->queue = dwc3_msm_ep_queue;
1474 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1475 ep->ops = new_ep_ops;
1476
1477 /*
1478 * Do HERE more usb endpoint configurations
1479 * which are specific to MSM.
1480 */
1481
1482 return 0;
1483}
1484EXPORT_SYMBOL(msm_ep_config);
1485
1486/**
1487 * Un-configure MSM endpoint.
1488 * Tear down configurations done in the
1489 * dwc3_msm_ep_config function.
1490 *
1491 * @ep - a pointer to some usb_ep instance
1492 *
1493 * @return int - 0 on success, negative on error.
1494 */
1495int msm_ep_unconfig(struct usb_ep *ep)
1496{
1497 struct dwc3_ep *dep = to_dwc3_ep(ep);
1498 struct dwc3 *dwc = dep->dwc;
1499 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1500 struct usb_ep_ops *old_ep_ops;
1501
1502 /* Restore original ep ops */
1503 if (!mdwc->original_ep_ops[dep->number]) {
1504 dev_err(mdwc->dev,
1505 "ep [%s,%d] was not configured as msm endpoint\n",
1506 ep->name, dep->number);
1507 return -EINVAL;
1508 }
1509 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1510 ep->ops = mdwc->original_ep_ops[dep->number];
1511 mdwc->original_ep_ops[dep->number] = NULL;
1512 kfree(old_ep_ops);
1513
1514 /*
1515 * Do HERE more usb endpoint un-configurations
1516 * which are specific to MSM.
1517 */
1518
1519 return 0;
1520}
1521EXPORT_SYMBOL(msm_ep_unconfig);
1522#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1523
1524static void dwc3_resume_work(struct work_struct *w);
1525
1526static void dwc3_restart_usb_work(struct work_struct *w)
1527{
1528 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1529 restart_usb_work);
1530 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1531 unsigned int timeout = 50;
1532
1533 dev_dbg(mdwc->dev, "%s\n", __func__);
1534
1535 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1536 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1537 return;
1538 }
1539
1540 /* guard against concurrent VBUS handling */
1541 mdwc->in_restart = true;
1542
1543 if (!mdwc->vbus_active) {
1544 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1545 dwc->err_evt_seen = false;
1546 mdwc->in_restart = false;
1547 return;
1548 }
1549
Mayank Rana08e41922017-03-02 15:25:48 -08001550 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001551 /* Reset active USB connection */
1552 dwc3_resume_work(&mdwc->resume_work);
1553
1554 /* Make sure disconnect is processed before sending connect */
1555 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1556 msleep(20);
1557
1558 if (!timeout) {
1559 dev_dbg(mdwc->dev,
1560 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001561 dbg_event(0xFF, "ReStart:RT SUSP",
1562 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001563 pm_runtime_suspend(mdwc->dev);
1564 }
1565
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301566 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001567 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301568 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001569 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001570
1571 dwc->err_evt_seen = false;
1572 flush_delayed_work(&mdwc->sm_work);
1573}
1574
Manu Gautam976fdfc2016-08-18 09:27:35 +05301575static int msm_dwc3_usbdev_notify(struct notifier_block *self,
1576 unsigned long action, void *priv)
1577{
1578 struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
1579 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1580 struct usb_bus *bus = priv;
1581
1582 /* Interested only in recovery when HC dies */
1583 if (action != USB_BUS_DIED)
1584 return 0;
1585
1586 dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
1587 /* Recovery already under process */
1588 if (mdwc->hc_died)
1589 return 0;
1590
1591 if (bus->controller != &dwc->xhci->dev) {
1592 dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
1593 return 0;
1594 }
1595
1596 mdwc->hc_died = true;
1597 schedule_delayed_work(&mdwc->sm_work, 0);
1598 return 0;
1599}
1600
1601
Mayank Rana511f3b22016-08-02 12:00:11 -07001602/*
1603 * Check whether the DWC3 requires resetting the ep
1604 * after going to Low Power Mode (lpm)
1605 */
1606bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1607{
1608 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1609 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1610
1611 return dbm_reset_ep_after_lpm(mdwc->dbm);
1612}
1613EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1614
1615/*
1616 * Config Global Distributed Switch Controller (GDSC)
1617 * to support controller power collapse
1618 */
1619static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1620{
1621 int ret;
1622
1623 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1624 return -EPERM;
1625
1626 if (on) {
1627 ret = regulator_enable(mdwc->dwc3_gdsc);
1628 if (ret) {
1629 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1630 return ret;
1631 }
1632 } else {
1633 ret = regulator_disable(mdwc->dwc3_gdsc);
1634 if (ret) {
1635 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1636 return ret;
1637 }
1638 }
1639
1640 return ret;
1641}
1642
1643static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1644{
1645 int ret = 0;
1646
1647 if (assert) {
Mayank Ranad339abe2017-05-31 09:19:49 -07001648 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001649 /* Using asynchronous block reset to the hardware */
1650 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1651 clk_disable_unprepare(mdwc->utmi_clk);
1652 clk_disable_unprepare(mdwc->sleep_clk);
1653 clk_disable_unprepare(mdwc->core_clk);
1654 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301655 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001656 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301657 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001658 } else {
1659 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301660 ret = reset_control_deassert(mdwc->core_reset);
1661 if (ret)
1662 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001663 ndelay(200);
1664 clk_prepare_enable(mdwc->iface_clk);
1665 clk_prepare_enable(mdwc->core_clk);
1666 clk_prepare_enable(mdwc->sleep_clk);
1667 clk_prepare_enable(mdwc->utmi_clk);
Mayank Ranad339abe2017-05-31 09:19:49 -07001668 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001669 }
1670
1671 return ret;
1672}
1673
1674static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1675{
1676 u32 guctl, gfladj = 0;
1677
1678 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1679 guctl &= ~DWC3_GUCTL_REFCLKPER;
1680
1681 /* GFLADJ register is used starting with revision 2.50a */
1682 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1683 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1684 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1685 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1686 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1687 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1688 }
1689
1690 /* Refer to SNPS Databook Table 6-55 for calculations used */
1691 switch (mdwc->utmi_clk_rate) {
1692 case 19200000:
1693 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1694 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1695 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1696 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1697 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1698 break;
1699 case 24000000:
1700 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1701 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1702 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1703 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1704 break;
1705 default:
1706 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1707 mdwc->utmi_clk_rate);
1708 break;
1709 }
1710
1711 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1712 if (gfladj)
1713 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1714}
1715
1716/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1717static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1718{
1719 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1720 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1721 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1722 BIT(2), 1);
1723
1724 /*
1725 * Enable master clock for RAMs to allow BAM to access RAMs when
1726 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1727 * are seen where RAM clocks get turned OFF in SS mode
1728 */
1729 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1730 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1731
1732}
1733
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001734static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1735{
1736 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1737 vbus_draw_work);
1738 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1739
1740 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1741}
1742
Mayank Rana511f3b22016-08-02 12:00:11 -07001743static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1744{
1745 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001746 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001747 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001748 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001749
1750 switch (event) {
1751 case DWC3_CONTROLLER_ERROR_EVENT:
1752 dev_info(mdwc->dev,
1753 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1754 dwc->irq_cnt);
1755
1756 dwc3_gadget_disable_irq(dwc);
1757
1758 /* prevent core from generating interrupts until recovery */
1759 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1760 reg |= DWC3_GCTL_CORESOFTRESET;
1761 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1762
1763 /* restart USB which performs full reset and reconnect */
1764 schedule_work(&mdwc->restart_usb_work);
1765 break;
1766 case DWC3_CONTROLLER_RESET_EVENT:
1767 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1768 /* HS & SSPHYs get reset as part of core soft reset */
1769 dwc3_msm_qscratch_reg_init(mdwc);
1770 break;
1771 case DWC3_CONTROLLER_POST_RESET_EVENT:
1772 dev_dbg(mdwc->dev,
1773 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1774
1775 /*
1776 * Below sequence is used when controller is working without
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301777 * having ssphy and only USB high/full speed is supported.
Mayank Rana511f3b22016-08-02 12:00:11 -07001778 */
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301779 if (dwc->maximum_speed == USB_SPEED_HIGH ||
1780 dwc->maximum_speed == USB_SPEED_FULL) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001781 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1782 dwc3_msm_read_reg(mdwc->base,
1783 QSCRATCH_GENERAL_CFG)
1784 | PIPE_UTMI_CLK_DIS);
1785
1786 usleep_range(2, 5);
1787
1788
1789 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1790 dwc3_msm_read_reg(mdwc->base,
1791 QSCRATCH_GENERAL_CFG)
1792 | PIPE_UTMI_CLK_SEL
1793 | PIPE3_PHYSTATUS_SW);
1794
1795 usleep_range(2, 5);
1796
1797 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1798 dwc3_msm_read_reg(mdwc->base,
1799 QSCRATCH_GENERAL_CFG)
1800 & ~PIPE_UTMI_CLK_DIS);
1801 }
1802
1803 dwc3_msm_update_ref_clk(mdwc);
1804 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1805 break;
1806 case DWC3_CONTROLLER_CONNDONE_EVENT:
1807 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1808 /*
1809 * Add power event if the dbm indicates coming out of L1 by
1810 * interrupt
1811 */
1812 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1813 dwc3_msm_write_reg_field(mdwc->base,
1814 PWR_EVNT_IRQ_MASK_REG,
1815 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1816
1817 atomic_set(&dwc->in_lpm, 0);
1818 break;
1819 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1820 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1821 if (dwc->enable_bus_suspend) {
1822 mdwc->suspend = dwc->b_suspend;
1823 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1824 }
1825 break;
1826 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1827 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001828 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001829 break;
1830 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1831 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001832 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001833 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001834 case DWC3_GSI_EVT_BUF_ALLOC:
1835 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1836
1837 if (!mdwc->num_gsi_event_buffers)
1838 break;
1839
1840 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1841 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1842 GFP_KERNEL);
1843 if (!mdwc->gsi_ev_buff) {
1844 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1845 break;
1846 }
1847
1848 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1849
1850 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1851 if (!evt)
1852 break;
1853 evt->dwc = dwc;
1854 evt->length = DWC3_EVENT_BUFFERS_SIZE;
1855 evt->buf = dma_alloc_coherent(dwc->dev,
1856 DWC3_EVENT_BUFFERS_SIZE,
1857 &evt->dma, GFP_KERNEL);
1858 if (!evt->buf) {
1859 dev_err(dwc->dev,
1860 "can't allocate gsi_evt_buf(%d)\n", i);
1861 break;
1862 }
1863 mdwc->gsi_ev_buff[i] = evt;
1864 }
1865 break;
1866 case DWC3_GSI_EVT_BUF_SETUP:
1867 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1868 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1869 evt = mdwc->gsi_ev_buff[i];
Mayank Rana0eb0db72017-10-03 13:46:32 -07001870 if (!evt)
1871 break;
1872
Mayank Ranaf4918d32016-12-15 13:35:55 -08001873 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1874 evt->buf, (unsigned long long) evt->dma,
1875 evt->length);
1876 memset(evt->buf, 0, evt->length);
1877 evt->lpos = 0;
1878 /*
1879 * Primary event buffer is programmed with registers
1880 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1881 * program USB GSI related event buffer with DWC3
1882 * controller.
1883 */
1884 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1885 lower_32_bits(evt->dma));
1886 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1887 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1888 DWC3_GEVENT_TYPE_GSI) |
1889 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1890 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1891 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1892 ((evt->length) & 0xffff));
1893 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1894 }
1895 break;
1896 case DWC3_GSI_EVT_BUF_CLEANUP:
1897 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
Mayank Rana0eb0db72017-10-03 13:46:32 -07001898 if (!mdwc->gsi_ev_buff)
1899 break;
1900
Mayank Ranaf4918d32016-12-15 13:35:55 -08001901 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1902 evt = mdwc->gsi_ev_buff[i];
1903 evt->lpos = 0;
1904 /*
1905 * Primary event buffer is programmed with registers
1906 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1907 * program USB GSI related event buffer with DWC3
1908 * controller.
1909 */
1910 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1911 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1912 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1913 DWC3_GEVNTSIZ_INTMASK |
1914 DWC3_GEVNTSIZ_SIZE((i+1)));
1915 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1916 }
1917 break;
1918 case DWC3_GSI_EVT_BUF_FREE:
1919 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
Mayank Rana0eb0db72017-10-03 13:46:32 -07001920 if (!mdwc->gsi_ev_buff)
1921 break;
1922
Mayank Ranaf4918d32016-12-15 13:35:55 -08001923 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1924 evt = mdwc->gsi_ev_buff[i];
1925 if (evt)
1926 dma_free_coherent(dwc->dev, evt->length,
1927 evt->buf, evt->dma);
1928 }
1929 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001930 default:
1931 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1932 break;
1933 }
1934}
1935
1936static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1937{
1938 int ret = 0;
1939
1940 if (core_reset) {
1941 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1942 if (ret)
1943 return;
1944
1945 usleep_range(1000, 1200);
1946 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1947 if (ret)
1948 return;
1949
1950 usleep_range(10000, 12000);
1951 }
1952
1953 if (mdwc->dbm) {
1954 /* Reset the DBM */
1955 dbm_soft_reset(mdwc->dbm, 1);
1956 usleep_range(1000, 1200);
1957 dbm_soft_reset(mdwc->dbm, 0);
1958
1959 /*enable DBM*/
1960 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1961 DBM_EN_MASK, 0x1);
1962 dbm_enable(mdwc->dbm);
1963 }
1964}
1965
1966static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1967{
1968 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1969 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301970 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001971
1972 /* Configure AHB2PHY for one wait state read/write */
1973 if (mdwc->ahb2phy_base) {
1974 clk_prepare_enable(mdwc->cfg_ahb_clk);
1975 val = readl_relaxed(mdwc->ahb2phy_base +
1976 PERIPH_SS_AHB2PHY_TOP_CFG);
1977 if (val != ONE_READ_WRITE_WAIT) {
1978 writel_relaxed(ONE_READ_WRITE_WAIT,
1979 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1980 /* complete above write before configuring USB PHY. */
1981 mb();
1982 }
1983 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1984 }
1985
1986 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001987 dbg_event(0xFF, "dwc3 init",
1988 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301989 ret = dwc3_core_pre_init(dwc);
1990 if (ret) {
1991 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1992 return;
1993 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001994 mdwc->init = true;
1995 }
1996
1997 dwc3_core_init(dwc);
1998 /* Re-configure event buffers */
1999 dwc3_event_buffers_setup(dwc);
2000}
2001
2002static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
2003{
2004 unsigned long timeout;
2005 u32 reg = 0;
2006
2007 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05302008 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002009 if (!atomic_read(&mdwc->in_p3)) {
2010 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
2011 return -EBUSY;
2012 }
2013 }
2014
2015 /* Clear previous L2 events */
2016 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2017 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
2018
2019 /* Prepare HSPHY for suspend */
2020 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
2021 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2022 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
2023
2024 /* Wait for PHY to go into L2 */
2025 timeout = jiffies + msecs_to_jiffies(5);
2026 while (!time_after(jiffies, timeout)) {
2027 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2028 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
2029 break;
2030 }
2031 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
2032 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
2033
2034 /* Clear L2 event bit */
2035 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2036 PWR_EVNT_LPM_IN_L2_MASK);
2037
2038 return 0;
2039}
2040
Mayank Rana511f3b22016-08-02 12:00:11 -07002041static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
2042{
2043 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2044 int i, num_ports;
2045 u32 reg;
2046
2047 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2048 if (mdwc->in_host_mode) {
2049 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
2050 num_ports = HCS_MAX_PORTS(reg);
2051 for (i = 0; i < num_ports; i++) {
2052 reg = dwc3_msm_read_reg(mdwc->base,
2053 USB3_PORTSC + i*0x10);
2054 if (reg & PORT_PE) {
2055 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
2056 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2057 else if (DEV_LOWSPEED(reg))
2058 mdwc->hs_phy->flags |= PHY_LS_MODE;
2059 }
2060 }
2061 } else {
2062 if (dwc->gadget.speed == USB_SPEED_HIGH ||
2063 dwc->gadget.speed == USB_SPEED_FULL)
2064 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2065 else if (dwc->gadget.speed == USB_SPEED_LOW)
2066 mdwc->hs_phy->flags |= PHY_LS_MODE;
2067 }
2068}
2069
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302070static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
2071 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07002072
Mayank Ranad339abe2017-05-31 09:19:49 -07002073static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc,
2074 struct usb_irq *uirq, unsigned int polarity, bool enable)
2075{
2076 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2077
2078 if (uirq && enable && !uirq->enable) {
2079 dbg_event(0xFF, "PDC_IRQ_EN", uirq->irq);
2080 dbg_event(0xFF, "PDC_IRQ_POL", polarity);
2081 /* clear any pending interrupt */
2082 irq_set_irqchip_state(uirq->irq, IRQCHIP_STATE_PENDING, 0);
2083 irq_set_irq_type(uirq->irq, polarity);
2084 enable_irq_wake(uirq->irq);
2085 enable_irq(uirq->irq);
2086 uirq->enable = true;
2087 }
2088
2089 if (uirq && !enable && uirq->enable) {
2090 dbg_event(0xFF, "PDC_IRQ_DIS", uirq->irq);
2091 disable_irq_wake(uirq->irq);
2092 disable_irq_nosync(uirq->irq);
2093 uirq->enable = false;
2094 }
2095}
2096
2097static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable)
2098{
2099 if (!enable)
2100 goto disable_usb_irq;
2101
2102 if (mdwc->hs_phy->flags & PHY_LS_MODE) {
2103 configure_usb_wakeup_interrupt(mdwc,
2104 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2105 IRQ_TYPE_EDGE_FALLING, enable);
2106 } else if (mdwc->hs_phy->flags & PHY_HSFS_MODE) {
2107 configure_usb_wakeup_interrupt(mdwc,
2108 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2109 IRQ_TYPE_EDGE_FALLING, enable);
2110 } else {
2111 configure_usb_wakeup_interrupt(mdwc,
2112 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2113 IRQ_TYPE_EDGE_RISING, true);
2114 configure_usb_wakeup_interrupt(mdwc,
2115 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2116 IRQ_TYPE_EDGE_RISING, true);
2117 }
2118
2119 configure_usb_wakeup_interrupt(mdwc,
2120 &mdwc->wakeup_irq[SS_PHY_IRQ],
2121 IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH, enable);
2122 return;
2123
2124disable_usb_irq:
2125 configure_usb_wakeup_interrupt(mdwc,
2126 &mdwc->wakeup_irq[DP_HS_PHY_IRQ], 0, enable);
2127 configure_usb_wakeup_interrupt(mdwc,
2128 &mdwc->wakeup_irq[DM_HS_PHY_IRQ], 0, enable);
2129 configure_usb_wakeup_interrupt(mdwc,
2130 &mdwc->wakeup_irq[SS_PHY_IRQ], 0, enable);
2131}
2132
2133static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
2134 struct usb_irq *uirq, bool enable)
2135{
2136 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2137
2138 if (uirq && enable && !uirq->enable) {
2139 dbg_event(0xFF, "IRQ_EN", uirq->irq);
2140 enable_irq_wake(uirq->irq);
2141 enable_irq(uirq->irq);
2142 uirq->enable = true;
2143 }
2144
2145 if (uirq && !enable && uirq->enable) {
2146 dbg_event(0xFF, "IRQ_DIS", uirq->irq);
2147 disable_irq_wake(uirq->irq);
2148 disable_irq_nosync(uirq->irq);
2149 uirq->enable = true;
2150 }
2151}
2152
Mayank Rana511f3b22016-08-02 12:00:11 -07002153static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
2154{
Mayank Rana83ad5822016-08-09 14:17:22 -07002155 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07002156 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07002157 struct dwc3_event_buffer *evt;
Mayank Ranad339abe2017-05-31 09:19:49 -07002158 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002159
2160 if (atomic_read(&dwc->in_lpm)) {
2161 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
2162 return 0;
2163 }
2164
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302165 cancel_delayed_work_sync(&mdwc->perf_vote_work);
2166 msm_dwc3_perf_vote_update(mdwc, false);
2167
Mayank Rana511f3b22016-08-02 12:00:11 -07002168 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07002169 evt = dwc->ev_buf;
2170 if ((evt->flags & DWC3_EVENT_PENDING)) {
2171 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002172 "%s: %d device events pending, abort suspend\n",
2173 __func__, evt->count / 4);
Mayank Rana83ad5822016-08-09 14:17:22 -07002174 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002175 }
2176 }
2177
2178 if (!mdwc->vbus_active && dwc->is_drd &&
2179 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2180 /*
2181 * In some cases, the pm_runtime_suspend may be called by
2182 * usb_bam when there is pending lpm flag. However, if this is
2183 * done when cable was disconnected and otg state has not
2184 * yet changed to IDLE, then it means OTG state machine
2185 * is running and we race against it. So cancel LPM for now,
2186 * and OTG state machine will go for LPM later, after completing
2187 * transition to IDLE state.
2188 */
2189 dev_dbg(mdwc->dev,
2190 "%s: cable disconnected while not in idle otg state\n",
2191 __func__);
2192 return -EBUSY;
2193 }
2194
2195 /*
2196 * Check if device is not in CONFIGURED state
2197 * then check controller state of L2 and break
2198 * LPM sequence. Check this for device bus suspend case.
2199 */
2200 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2201 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2202 pr_err("%s(): Trying to go in LPM with state:%d\n",
2203 __func__, dwc->gadget.state);
2204 pr_err("%s(): LPM is not performed.\n", __func__);
2205 return -EBUSY;
2206 }
2207
2208 ret = dwc3_msm_prepare_suspend(mdwc);
2209 if (ret)
2210 return ret;
2211
Mayank Rana511f3b22016-08-02 12:00:11 -07002212 /* Disable core irq */
2213 if (dwc->irq)
2214 disable_irq(dwc->irq);
2215
Mayank Ranaf616a7f2017-03-20 16:10:39 -07002216 if (work_busy(&dwc->bh_work))
2217 dbg_event(0xFF, "pend evt", 0);
2218
Mayank Rana511f3b22016-08-02 12:00:11 -07002219 /* disable power event irq, hs and ss phy irq is used as wake up src */
Mayank Ranad339abe2017-05-31 09:19:49 -07002220 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07002221
2222 dwc3_set_phy_speed_flags(mdwc);
2223 /* Suspend HS PHY */
2224 usb_phy_set_suspend(mdwc->hs_phy, 1);
2225
2226 /* Suspend SS PHY */
Mayank Rana17f67e32017-08-15 10:41:28 -07002227 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002228 /* indicate phy about SS mode */
2229 if (dwc3_msm_is_superspeed(mdwc))
2230 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2231 usb_phy_set_suspend(mdwc->ss_phy, 1);
2232 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2233 }
2234
2235 /* make sure above writes are completed before turning off clocks */
2236 wmb();
2237
2238 /* Disable clocks */
2239 if (mdwc->bus_aggr_clk)
2240 clk_disable_unprepare(mdwc->bus_aggr_clk);
2241 clk_disable_unprepare(mdwc->utmi_clk);
2242
Hemant Kumar633dc332016-08-10 13:41:05 -07002243 /* Memory core: OFF, Memory periphery: OFF */
2244 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2245 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2246 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2247 }
2248
Mayank Rana511f3b22016-08-02 12:00:11 -07002249 clk_set_rate(mdwc->core_clk, 19200000);
2250 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302251 if (mdwc->noc_aggr_clk)
2252 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002253 /*
2254 * Disable iface_clk only after core_clk as core_clk has FSM
2255 * depedency on iface_clk. Hence iface_clk should be turned off
2256 * after core_clk is turned off.
2257 */
2258 clk_disable_unprepare(mdwc->iface_clk);
2259 /* USB PHY no more requires TCXO */
2260 clk_disable_unprepare(mdwc->xo_clk);
2261
2262 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002263 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002264 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2265 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2266 dwc3_msm_config_gdsc(mdwc, 0);
2267 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002268
Jack Pham9faa51df2017-04-03 18:13:40 -07002269 if (mdwc->iommu_map) {
Jack Phambbe27962017-03-23 18:42:26 -07002270 arm_iommu_detach_device(mdwc->dev);
Jack Pham9faa51df2017-04-03 18:13:40 -07002271 dev_dbg(mdwc->dev, "IOMMU detached\n");
2272 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002273 }
2274
2275 /* Remove bus voting */
2276 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002277 dbg_event(0xFF, "bus_devote_start", 0);
2278 ret = msm_bus_scale_client_update_request(
2279 mdwc->bus_perf_client, 0);
2280 dbg_event(0xFF, "bus_devote_finish", 0);
2281 if (ret)
2282 dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002283 }
2284
2285 /*
2286 * release wakeup source with timeout to defer system suspend to
2287 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2288 * event is received.
2289 */
2290 if (mdwc->lpm_to_suspend_delay) {
2291 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2292 mdwc->lpm_to_suspend_delay);
2293 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2294 } else {
2295 pm_relax(mdwc->dev);
2296 }
2297
2298 atomic_set(&dwc->in_lpm, 1);
2299
2300 /*
2301 * with DCP or during cable disconnect, we dont require wakeup
2302 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2303 * case of host bus suspend and device bus suspend.
2304 */
2305 if (mdwc->vbus_active || mdwc->in_host_mode) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002306 if (mdwc->use_pdc_interrupts) {
2307 enable_usb_pdc_interrupt(mdwc, true);
2308 } else {
2309 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2310 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
2311 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2312 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
Mayank Rana511f3b22016-08-02 12:00:11 -07002313 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002314 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2315 }
2316
2317 dev_info(mdwc->dev, "DWC3 in low power mode\n");
2318 return 0;
2319}
2320
2321static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2322{
2323 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002324 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002325 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Ranad339abe2017-05-31 09:19:49 -07002326 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002327
2328 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2329
2330 if (!atomic_read(&dwc->in_lpm)) {
2331 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
2332 return 0;
2333 }
2334
2335 pm_stay_awake(mdwc->dev);
2336
2337 /* Enable bus voting */
2338 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002339 dbg_event(0xFF, "bus_vote_start", 1);
2340 ret = msm_bus_scale_client_update_request(
2341 mdwc->bus_perf_client, 1);
2342 dbg_event(0xFF, "bus_vote_finish", 1);
2343 if (ret)
2344 dev_err(mdwc->dev, "bus bw voting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002345 }
2346
2347 /* Vote for TCXO while waking up USB HSPHY */
2348 ret = clk_prepare_enable(mdwc->xo_clk);
2349 if (ret)
2350 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2351 __func__, ret);
2352
2353 /* Restore controller power collapse */
2354 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2355 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2356 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302357 ret = reset_control_assert(mdwc->core_reset);
2358 if (ret)
2359 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2360 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002361 /* HW requires a short delay for reset to take place properly */
2362 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302363 ret = reset_control_deassert(mdwc->core_reset);
2364 if (ret)
2365 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2366 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002367 clk_prepare_enable(mdwc->sleep_clk);
2368 }
2369
2370 /*
2371 * Enable clocks
2372 * Turned ON iface_clk before core_clk due to FSM depedency.
2373 */
2374 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302375 if (mdwc->noc_aggr_clk)
2376 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002377
2378 core_clk_rate = mdwc->core_clk_rate;
2379 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2380 core_clk_rate = mdwc->core_clk_rate_hs;
2381 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2382 core_clk_rate);
2383 }
2384
2385 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002386 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002387
2388 /* set Memory core: ON, Memory periphery: ON */
2389 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2390 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2391
Mayank Rana511f3b22016-08-02 12:00:11 -07002392 clk_prepare_enable(mdwc->utmi_clk);
2393 if (mdwc->bus_aggr_clk)
2394 clk_prepare_enable(mdwc->bus_aggr_clk);
2395
2396 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002397 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2398 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002399 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2400 if (mdwc->typec_orientation == ORIENTATION_CC1)
2401 mdwc->ss_phy->flags |= PHY_LANE_A;
2402 if (mdwc->typec_orientation == ORIENTATION_CC2)
2403 mdwc->ss_phy->flags |= PHY_LANE_B;
2404 usb_phy_set_suspend(mdwc->ss_phy, 0);
2405 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2406 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2407 }
2408
2409 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2410 /* Resume HS PHY */
2411 usb_phy_set_suspend(mdwc->hs_phy, 0);
2412
2413 /* Recover from controller power collapse */
2414 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2415 u32 tmp;
2416
Jack Pham9faa51df2017-04-03 18:13:40 -07002417 if (mdwc->iommu_map) {
2418 ret = arm_iommu_attach_device(mdwc->dev,
2419 mdwc->iommu_map);
2420 if (ret)
2421 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2422 ret);
2423 else
2424 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2425 }
2426
Mayank Rana511f3b22016-08-02 12:00:11 -07002427 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2428
2429 dwc3_msm_power_collapse_por(mdwc);
2430
2431 /* Get initial P3 status and enable IN_P3 event */
2432 tmp = dwc3_msm_read_reg_field(mdwc->base,
2433 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2434 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2435 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2436 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2437
2438 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2439 }
2440
2441 atomic_set(&dwc->in_lpm, 0);
2442
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302443 /* enable power evt irq for IN P3 detection */
Mayank Ranad339abe2017-05-31 09:19:49 -07002444 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302445
Mayank Rana511f3b22016-08-02 12:00:11 -07002446 /* Disable HSPHY auto suspend */
2447 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2448 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2449 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2450 DWC3_GUSB2PHYCFG_SUSPHY));
2451
2452 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2453 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002454 if (mdwc->use_pdc_interrupts) {
2455 enable_usb_pdc_interrupt(mdwc, false);
2456 } else {
2457 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2458 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
2459 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2460 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07002461 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002462 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2463 }
2464
2465 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2466
Mayank Rana511f3b22016-08-02 12:00:11 -07002467 /* Enable core irq */
2468 if (dwc->irq)
2469 enable_irq(dwc->irq);
2470
2471 /*
2472 * Handle other power events that could not have been handled during
2473 * Low Power Mode
2474 */
2475 dwc3_pwr_event_handler(mdwc);
2476
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302477 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2478 schedule_delayed_work(&mdwc->perf_vote_work,
2479 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2480
Mayank Rana08e41922017-03-02 15:25:48 -08002481 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002482 return 0;
2483}
2484
2485/**
2486 * dwc3_ext_event_notify - callback to handle events from external transceiver
2487 *
2488 * Returns 0 on success
2489 */
2490static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2491{
2492 /* Flush processing any pending events before handling new ones */
2493 flush_delayed_work(&mdwc->sm_work);
2494
2495 if (mdwc->id_state == DWC3_ID_FLOAT) {
2496 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2497 set_bit(ID, &mdwc->inputs);
2498 } else {
2499 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2500 clear_bit(ID, &mdwc->inputs);
2501 }
2502
2503 if (mdwc->vbus_active && !mdwc->in_restart) {
2504 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2505 set_bit(B_SESS_VLD, &mdwc->inputs);
2506 } else {
2507 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2508 clear_bit(B_SESS_VLD, &mdwc->inputs);
2509 }
2510
2511 if (mdwc->suspend) {
2512 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2513 set_bit(B_SUSPEND, &mdwc->inputs);
2514 } else {
2515 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2516 clear_bit(B_SUSPEND, &mdwc->inputs);
2517 }
2518
2519 schedule_delayed_work(&mdwc->sm_work, 0);
2520}
2521
2522static void dwc3_resume_work(struct work_struct *w)
2523{
2524 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002525 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Jack Pham4e9dff72017-04-04 18:05:53 -07002526 union extcon_property_value val;
2527 unsigned int extcon_id;
2528 struct extcon_dev *edev = NULL;
2529 int ret = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07002530
2531 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2532
Jack Pham4e9dff72017-04-04 18:05:53 -07002533 if (mdwc->vbus_active) {
2534 edev = mdwc->extcon_vbus;
2535 extcon_id = EXTCON_USB;
2536 } else if (mdwc->id_state == DWC3_ID_GROUND) {
2537 edev = mdwc->extcon_id;
2538 extcon_id = EXTCON_USB_HOST;
2539 }
2540
2541 /* Check speed and Type-C polarity values in order to configure PHY */
2542 if (edev && extcon_get_state(edev, extcon_id)) {
2543 ret = extcon_get_property(edev, extcon_id,
2544 EXTCON_PROP_USB_SS, &val);
2545
2546 /* Use default dwc->maximum_speed if speed isn't reported */
2547 if (!ret)
2548 dwc->maximum_speed = (val.intval == 0) ?
2549 USB_SPEED_HIGH : USB_SPEED_SUPER;
2550
2551 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2552 dwc->maximum_speed = dwc->max_hw_supp_speed;
2553
Mayank Ranaf70d8212017-06-12 14:02:07 -07002554 if (override_usb_speed &&
2555 is_valid_usb_speed(dwc, override_usb_speed)) {
2556 dwc->maximum_speed = override_usb_speed;
2557 dbg_event(0xFF, "override_speed", override_usb_speed);
2558 }
2559
Jack Pham4e9dff72017-04-04 18:05:53 -07002560 dbg_event(0xFF, "speed", dwc->maximum_speed);
2561
2562 ret = extcon_get_property(edev, extcon_id,
2563 EXTCON_PROP_USB_TYPEC_POLARITY, &val);
2564 if (ret)
2565 mdwc->typec_orientation = ORIENTATION_NONE;
2566 else
2567 mdwc->typec_orientation = val.intval ?
2568 ORIENTATION_CC2 : ORIENTATION_CC1;
2569
2570 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2571 }
2572
Mayank Rana511f3b22016-08-02 12:00:11 -07002573 /*
2574 * exit LPM first to meet resume timeline from device side.
2575 * resume_pending flag would prevent calling
2576 * dwc3_msm_resume() in case we are here due to system
2577 * wide resume without usb cable connected. This flag is set
2578 * only in case of power event irq in lpm.
2579 */
2580 if (mdwc->resume_pending) {
2581 dwc3_msm_resume(mdwc);
2582 mdwc->resume_pending = false;
2583 }
2584
Mayank Rana08e41922017-03-02 15:25:48 -08002585 if (atomic_read(&mdwc->pm_suspended)) {
2586 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002587 /* let pm resume kick in resume work later */
2588 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002589 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002590 dwc3_ext_event_notify(mdwc);
2591}
2592
2593static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2594{
2595 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2596 u32 irq_stat, irq_clear = 0;
2597
2598 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2599 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2600
2601 /* Check for P3 events */
2602 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2603 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2604 /* Can't tell if entered or exit P3, so check LINKSTATE */
2605 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2606 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2607 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2608 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2609
2610 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2611 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2612 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2613 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2614 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2615 atomic_set(&mdwc->in_p3, 0);
2616 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2617 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2618 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2619 atomic_set(&mdwc->in_p3, 1);
2620 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2621 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2622 }
2623
2624 /* Clear L2 exit */
2625 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2626 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2627 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2628 }
2629
2630 /* Handle exit from L1 events */
2631 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2632 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2633 __func__);
2634 if (usb_gadget_wakeup(&dwc->gadget))
2635 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2636 __func__);
2637 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2638 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2639 }
2640
2641 /* Unhandled events */
2642 if (irq_stat)
2643 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2644 __func__, irq_stat);
2645
2646 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2647}
2648
2649static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2650{
2651 struct dwc3_msm *mdwc = _mdwc;
2652 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2653
2654 dev_dbg(mdwc->dev, "%s\n", __func__);
2655
2656 if (atomic_read(&dwc->in_lpm))
2657 dwc3_resume_work(&mdwc->resume_work);
2658 else
2659 dwc3_pwr_event_handler(mdwc);
2660
Mayank Rana08e41922017-03-02 15:25:48 -08002661 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002662 return IRQ_HANDLED;
2663}
2664
2665static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2666{
2667 struct dwc3_msm *mdwc = data;
2668 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2669
2670 dwc->t_pwr_evt_irq = ktime_get();
2671 dev_dbg(mdwc->dev, "%s received\n", __func__);
2672 /*
2673 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2674 * which interrupts have been triggered, as the clocks are disabled.
2675 * Resume controller by waking up pwr event irq thread.After re-enabling
2676 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2677 * all other power events.
2678 */
2679 if (atomic_read(&dwc->in_lpm)) {
2680 /* set this to call dwc3_msm_resume() */
2681 mdwc->resume_pending = true;
2682 return IRQ_WAKE_THREAD;
2683 }
2684
2685 dwc3_pwr_event_handler(mdwc);
2686 return IRQ_HANDLED;
2687}
2688
2689static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2690 unsigned long action, void *hcpu)
2691{
2692 uint32_t cpu = (uintptr_t)hcpu;
2693 struct dwc3_msm *mdwc =
2694 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2695
2696 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2697 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2698 cpu_to_affin, mdwc->irq_to_affin);
2699 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2700 }
2701
2702 return NOTIFY_OK;
2703}
2704
2705static void dwc3_otg_sm_work(struct work_struct *w);
2706
2707static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2708{
2709 int ret;
2710
2711 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2712 if (IS_ERR(mdwc->dwc3_gdsc))
2713 mdwc->dwc3_gdsc = NULL;
2714
2715 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2716 if (IS_ERR(mdwc->xo_clk)) {
2717 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2718 __func__);
2719 ret = PTR_ERR(mdwc->xo_clk);
2720 return ret;
2721 }
2722 clk_set_rate(mdwc->xo_clk, 19200000);
2723
2724 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2725 if (IS_ERR(mdwc->iface_clk)) {
2726 dev_err(mdwc->dev, "failed to get iface_clk\n");
2727 ret = PTR_ERR(mdwc->iface_clk);
2728 return ret;
2729 }
2730
2731 /*
2732 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2733 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2734 * On newer platform it can run at 150MHz as well.
2735 */
2736 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2737 if (IS_ERR(mdwc->core_clk)) {
2738 dev_err(mdwc->dev, "failed to get core_clk\n");
2739 ret = PTR_ERR(mdwc->core_clk);
2740 return ret;
2741 }
2742
Amit Nischal4d278212016-06-06 17:54:34 +05302743 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2744 if (IS_ERR(mdwc->core_reset)) {
2745 dev_err(mdwc->dev, "failed to get core_reset\n");
2746 return PTR_ERR(mdwc->core_reset);
2747 }
2748
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302749 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302750 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302751 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2752 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302753 }
2754
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302755 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302756 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302757 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2758 mdwc->core_clk_rate);
2759 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2760 if (ret)
2761 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002762
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002763 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2764 (u32 *)&mdwc->core_clk_rate_hs)) {
2765 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2766 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2767 }
2768
Mayank Rana511f3b22016-08-02 12:00:11 -07002769 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2770 if (IS_ERR(mdwc->sleep_clk)) {
2771 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2772 ret = PTR_ERR(mdwc->sleep_clk);
2773 return ret;
2774 }
2775
2776 clk_set_rate(mdwc->sleep_clk, 32000);
2777 mdwc->utmi_clk_rate = 19200000;
2778 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2779 if (IS_ERR(mdwc->utmi_clk)) {
2780 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2781 ret = PTR_ERR(mdwc->utmi_clk);
2782 return ret;
2783 }
2784
2785 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2786 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2787 if (IS_ERR(mdwc->bus_aggr_clk))
2788 mdwc->bus_aggr_clk = NULL;
2789
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302790 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2791 if (IS_ERR(mdwc->noc_aggr_clk))
2792 mdwc->noc_aggr_clk = NULL;
2793
Mayank Rana511f3b22016-08-02 12:00:11 -07002794 if (of_property_match_string(mdwc->dev->of_node,
2795 "clock-names", "cfg_ahb_clk") >= 0) {
2796 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2797 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2798 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2799 mdwc->cfg_ahb_clk = NULL;
2800 if (ret != -EPROBE_DEFER)
2801 dev_err(mdwc->dev,
2802 "failed to get cfg_ahb_clk ret %d\n",
2803 ret);
2804 return ret;
2805 }
2806 }
2807
2808 return 0;
2809}
2810
2811static int dwc3_msm_id_notifier(struct notifier_block *nb,
2812 unsigned long event, void *ptr)
2813{
2814 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002815 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002816 enum dwc3_id_state id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002817
2818 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2819
2820 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2821
Mayank Rana511f3b22016-08-02 12:00:11 -07002822 if (mdwc->id_state != id) {
2823 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002824 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002825 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2826 }
2827
Mayank Rana511f3b22016-08-02 12:00:11 -07002828 return NOTIFY_DONE;
2829}
2830
Hemant Kumar006fae42017-07-12 18:11:25 -07002831
2832static void check_for_sdp_connection(struct work_struct *w)
2833{
Hemant Kumar006fae42017-07-12 18:11:25 -07002834 struct dwc3_msm *mdwc =
2835 container_of(w, struct dwc3_msm, sdp_check.work);
2836 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2837
2838 if (!mdwc->vbus_active)
2839 return;
2840
2841 /* floating D+/D- lines detected */
2842 if (dwc->gadget.state < USB_STATE_DEFAULT &&
2843 dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
Hemant Kumar006fae42017-07-12 18:11:25 -07002844 mdwc->vbus_active = 0;
2845 dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
2846 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2847 }
2848}
2849
Mayank Rana511f3b22016-08-02 12:00:11 -07002850static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2851 unsigned long event, void *ptr)
2852{
2853 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2854 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002855
2856 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2857
2858 if (mdwc->vbus_active == event)
2859 return NOTIFY_DONE;
2860
Mayank Rana511f3b22016-08-02 12:00:11 -07002861 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002862 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002863 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002864
Mayank Rana511f3b22016-08-02 12:00:11 -07002865 return NOTIFY_DONE;
2866}
Jack Pham4e9dff72017-04-04 18:05:53 -07002867
Mayank Rana51958172017-02-28 14:49:21 -08002868/*
Mayank Rana25d02862017-09-12 14:49:41 -07002869 * Handle EUD based soft detach/attach event
Mayank Rana51958172017-02-28 14:49:21 -08002870 *
2871 * @nb - notifier handler
2872 * @event - event information i.e. soft detach/attach event
2873 * @ptr - extcon_dev pointer
2874 *
2875 * @return int - NOTIFY_DONE always due to EUD
2876 */
2877static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2878 unsigned long event, void *ptr)
2879{
2880 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2881 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana51958172017-02-28 14:49:21 -08002882
2883 dbg_event(0xFF, "EUD_NB", event);
2884 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2885 if (mdwc->vbus_active == event)
2886 return NOTIFY_DONE;
2887
Mayank Rana51958172017-02-28 14:49:21 -08002888 mdwc->vbus_active = event;
2889 if (dwc->is_drd && !mdwc->in_restart)
2890 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002891
Mayank Rana51958172017-02-28 14:49:21 -08002892 return NOTIFY_DONE;
2893}
Mayank Rana511f3b22016-08-02 12:00:11 -07002894
2895static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2896{
2897 struct device_node *node = mdwc->dev->of_node;
2898 struct extcon_dev *edev;
2899 int ret = 0;
2900
2901 if (!of_property_read_bool(node, "extcon"))
2902 return 0;
2903
Mayank Rana51958172017-02-28 14:49:21 -08002904 /* Use first phandle (mandatory) for USB vbus status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002905 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2906 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2907 return PTR_ERR(edev);
2908
2909 if (!IS_ERR(edev)) {
2910 mdwc->extcon_vbus = edev;
2911 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2912 ret = extcon_register_notifier(edev, EXTCON_USB,
2913 &mdwc->vbus_nb);
2914 if (ret < 0) {
2915 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2916 return ret;
2917 }
2918 }
2919
Mayank Rana51958172017-02-28 14:49:21 -08002920 /* Use second phandle (optional) for USB ID status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002921 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2922 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2923 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2924 ret = PTR_ERR(edev);
2925 goto err;
2926 }
2927 }
2928
2929 if (!IS_ERR(edev)) {
2930 mdwc->extcon_id = edev;
2931 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
Mayank Rana54d60432017-07-18 12:10:04 -07002932 mdwc->host_restart_nb.notifier_call =
2933 dwc3_restart_usb_host_mode;
Mayank Rana511f3b22016-08-02 12:00:11 -07002934 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2935 &mdwc->id_nb);
2936 if (ret < 0) {
2937 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2938 goto err;
2939 }
Mayank Rana54d60432017-07-18 12:10:04 -07002940
2941 ret = extcon_register_blocking_notifier(edev, EXTCON_USB_HOST,
2942 &mdwc->host_restart_nb);
2943 if (ret < 0) {
2944 dev_err(mdwc->dev, "failed to register blocking notifier\n");
2945 goto err1;
2946 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002947 }
2948
Mayank Rana81bd2e52017-07-26 16:15:15 -07002949 edev = NULL;
Mayank Rana51958172017-02-28 14:49:21 -08002950 /* Use third phandle (optional) for EUD based detach/attach events */
2951 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2952 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2953 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2954 ret = PTR_ERR(edev);
Mayank Rana54d60432017-07-18 12:10:04 -07002955 goto err1;
Mayank Rana51958172017-02-28 14:49:21 -08002956 }
2957 }
2958
Mayank Rana81bd2e52017-07-26 16:15:15 -07002959 if (!IS_ERR_OR_NULL(edev)) {
Mayank Rana51958172017-02-28 14:49:21 -08002960 mdwc->extcon_eud = edev;
2961 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2962 ret = extcon_register_notifier(edev, EXTCON_USB,
2963 &mdwc->eud_event_nb);
2964 if (ret < 0) {
2965 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
Mayank Rana54d60432017-07-18 12:10:04 -07002966 goto err2;
Mayank Rana51958172017-02-28 14:49:21 -08002967 }
2968 }
2969
Mayank Rana511f3b22016-08-02 12:00:11 -07002970 return 0;
Mayank Rana54d60432017-07-18 12:10:04 -07002971err2:
2972 if (mdwc->extcon_id)
2973 extcon_unregister_blocking_notifier(mdwc->extcon_id,
2974 EXTCON_USB_HOST, &mdwc->host_restart_nb);
Mayank Rana51958172017-02-28 14:49:21 -08002975err1:
2976 if (mdwc->extcon_id)
2977 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2978 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002979err:
2980 if (mdwc->extcon_vbus)
2981 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2982 &mdwc->vbus_nb);
2983 return ret;
2984}
2985
Jack Phambbe27962017-03-23 18:42:26 -07002986#define SMMU_BASE 0x10000000 /* Device address range base */
2987#define SMMU_SIZE 0x40000000 /* Device address range size */
2988
2989static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
2990{
2991 struct device_node *node = mdwc->dev->of_node;
Jack Pham283cece2017-04-05 09:58:17 -07002992 int atomic_ctx = 1, s1_bypass;
Jack Phambbe27962017-03-23 18:42:26 -07002993 int ret;
2994
2995 if (!of_property_read_bool(node, "iommus"))
2996 return 0;
2997
2998 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
2999 SMMU_BASE, SMMU_SIZE);
3000 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
3001 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
3002 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
3003 ret);
3004 return ret;
3005 }
3006 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
3007
3008 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
3009 &atomic_ctx);
3010 if (ret) {
3011 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
3012 ret);
Jack Pham9faa51df2017-04-03 18:13:40 -07003013 goto release_mapping;
Jack Phambbe27962017-03-23 18:42:26 -07003014 }
3015
Jack Pham283cece2017-04-05 09:58:17 -07003016 s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
3017 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
3018 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
3019 if (ret) {
3020 dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
3021 s1_bypass, ret);
3022 goto release_mapping;
3023 }
3024
Jack Pham9faa51df2017-04-03 18:13:40 -07003025 ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
3026 if (ret) {
3027 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
3028 goto release_mapping;
3029 }
3030 dev_dbg(mdwc->dev, "attached to IOMMU\n");
3031
Jack Phambbe27962017-03-23 18:42:26 -07003032 return 0;
Jack Pham9faa51df2017-04-03 18:13:40 -07003033
3034release_mapping:
3035 arm_iommu_release_mapping(mdwc->iommu_map);
3036 mdwc->iommu_map = NULL;
3037 return ret;
Jack Phambbe27962017-03-23 18:42:26 -07003038}
3039
Mayank Rana511f3b22016-08-02 12:00:11 -07003040static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
3041 char *buf)
3042{
3043 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3044
3045 if (mdwc->vbus_active)
3046 return snprintf(buf, PAGE_SIZE, "peripheral\n");
3047 if (mdwc->id_state == DWC3_ID_GROUND)
3048 return snprintf(buf, PAGE_SIZE, "host\n");
3049
3050 return snprintf(buf, PAGE_SIZE, "none\n");
3051}
3052
3053static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
3054 const char *buf, size_t count)
3055{
3056 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3057
3058 if (sysfs_streq(buf, "peripheral")) {
3059 mdwc->vbus_active = true;
3060 mdwc->id_state = DWC3_ID_FLOAT;
3061 } else if (sysfs_streq(buf, "host")) {
3062 mdwc->vbus_active = false;
3063 mdwc->id_state = DWC3_ID_GROUND;
3064 } else {
3065 mdwc->vbus_active = false;
3066 mdwc->id_state = DWC3_ID_FLOAT;
3067 }
3068
3069 dwc3_ext_event_notify(mdwc);
3070
3071 return count;
3072}
3073
3074static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303075static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07003076
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003077static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
3078 char *buf)
3079{
3080 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3081 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3082
3083 return snprintf(buf, PAGE_SIZE, "%s\n",
3084 usb_speed_string(dwc->max_hw_supp_speed));
3085}
3086
3087static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
3088 const char *buf, size_t count)
3089{
3090 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3091 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3092 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
3093
3094 if (sysfs_streq(buf, "high"))
3095 req_speed = USB_SPEED_HIGH;
3096 else if (sysfs_streq(buf, "super"))
3097 req_speed = USB_SPEED_SUPER;
3098
3099 if (req_speed != USB_SPEED_UNKNOWN &&
3100 req_speed != dwc->max_hw_supp_speed) {
3101 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
3102 schedule_work(&mdwc->restart_usb_work);
3103 }
3104
3105 return count;
3106}
3107static DEVICE_ATTR_RW(speed);
3108
Mayank Rana511f3b22016-08-02 12:00:11 -07003109static int dwc3_msm_probe(struct platform_device *pdev)
3110{
3111 struct device_node *node = pdev->dev.of_node, *dwc3_node;
3112 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08003113 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003114 struct dwc3_msm *mdwc;
3115 struct dwc3 *dwc;
3116 struct resource *res;
3117 void __iomem *tcsr;
3118 bool host_mode;
Mayank Ranad339abe2017-05-31 09:19:49 -07003119 int ret = 0, i;
Mayank Rana511f3b22016-08-02 12:00:11 -07003120 int ext_hub_reset_gpio;
3121 u32 val;
Mayank Ranad339abe2017-05-31 09:19:49 -07003122 unsigned long irq_type;
Mayank Rana511f3b22016-08-02 12:00:11 -07003123
3124 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
3125 if (!mdwc)
3126 return -ENOMEM;
3127
3128 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
3129 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
3130 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
3131 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
3132 return -EOPNOTSUPP;
3133 }
3134 }
3135
3136 platform_set_drvdata(pdev, mdwc);
3137 mdwc->dev = &pdev->dev;
3138
3139 INIT_LIST_HEAD(&mdwc->req_complete_list);
3140 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
3141 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07003142 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003143 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303144 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Hemant Kumar006fae42017-07-12 18:11:25 -07003145 INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
Mayank Rana511f3b22016-08-02 12:00:11 -07003146
3147 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
3148 if (!mdwc->dwc3_wq) {
3149 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
3150 return -ENOMEM;
3151 }
3152
3153 /* Get all clks and gdsc reference */
3154 ret = dwc3_msm_get_clk_gdsc(mdwc);
3155 if (ret) {
3156 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
Ziqi Chen0ea81162017-08-04 18:17:55 +08003157 goto err;
Mayank Rana511f3b22016-08-02 12:00:11 -07003158 }
3159
3160 mdwc->id_state = DWC3_ID_FLOAT;
3161 set_bit(ID, &mdwc->inputs);
3162
3163 mdwc->charging_disabled = of_property_read_bool(node,
3164 "qcom,charging-disabled");
3165
3166 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
3167 &mdwc->lpm_to_suspend_delay);
3168 if (ret) {
3169 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
3170 mdwc->lpm_to_suspend_delay = 0;
3171 }
3172
Mayank Ranad339abe2017-05-31 09:19:49 -07003173 memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
3174 for (i = 0; i < USB_MAX_IRQ; i++) {
3175 irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
3176 IRQF_ONESHOT;
3177 mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
3178 mdwc->wakeup_irq[i].name);
3179 if (mdwc->wakeup_irq[i].irq < 0) {
3180 /* pwr_evnt_irq is only mandatory irq */
3181 if (!strcmp(mdwc->wakeup_irq[i].name,
3182 "pwr_event_irq")) {
3183 dev_err(&pdev->dev, "get_irq for %s failed\n\n",
3184 mdwc->wakeup_irq[i].name);
3185 ret = -EINVAL;
3186 goto err;
3187 }
3188 mdwc->wakeup_irq[i].irq = 0;
3189 } else {
3190 irq_set_status_flags(mdwc->wakeup_irq[i].irq,
3191 IRQ_NOAUTOEN);
3192 /* ss_phy_irq is level trigger interrupt */
3193 if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
3194 irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
3195 IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
Mayank Rana511f3b22016-08-02 12:00:11 -07003196
Mayank Ranad339abe2017-05-31 09:19:49 -07003197 ret = devm_request_threaded_irq(&pdev->dev,
3198 mdwc->wakeup_irq[i].irq,
Mayank Rana511f3b22016-08-02 12:00:11 -07003199 msm_dwc3_pwr_irq,
3200 msm_dwc3_pwr_irq_thread,
Mayank Ranad339abe2017-05-31 09:19:49 -07003201 irq_type,
3202 mdwc->wakeup_irq[i].name, mdwc);
3203 if (ret) {
3204 dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
3205 mdwc->wakeup_irq[i].name, ret);
3206 goto err;
3207 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003208 }
3209 }
3210
3211 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3212 if (!res) {
3213 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3214 } else {
3215 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3216 resource_size(res));
3217 if (IS_ERR_OR_NULL(tcsr)) {
3218 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3219 } else {
3220 /* Enable USB3 on the primary USB port. */
3221 writel_relaxed(0x1, tcsr);
3222 /*
3223 * Ensure that TCSR write is completed before
3224 * USB registers initialization.
3225 */
3226 mb();
3227 }
3228 }
3229
3230 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3231 if (!res) {
3232 dev_err(&pdev->dev, "missing memory base resource\n");
3233 ret = -ENODEV;
3234 goto err;
3235 }
3236
3237 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3238 resource_size(res));
3239 if (!mdwc->base) {
3240 dev_err(&pdev->dev, "ioremap failed\n");
3241 ret = -ENODEV;
3242 goto err;
3243 }
3244
3245 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3246 "ahb2phy_base");
3247 if (res) {
3248 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3249 res->start, resource_size(res));
3250 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3251 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3252 mdwc->ahb2phy_base = NULL;
3253 } else {
3254 /*
3255 * On some targets cfg_ahb_clk depends upon usb gdsc
3256 * regulator. If cfg_ahb_clk is enabled without
3257 * turning on usb gdsc regulator clk is stuck off.
3258 */
3259 dwc3_msm_config_gdsc(mdwc, 1);
3260 clk_prepare_enable(mdwc->cfg_ahb_clk);
3261 /* Configure AHB2PHY for one wait state read/write*/
3262 val = readl_relaxed(mdwc->ahb2phy_base +
3263 PERIPH_SS_AHB2PHY_TOP_CFG);
3264 if (val != ONE_READ_WRITE_WAIT) {
3265 writel_relaxed(ONE_READ_WRITE_WAIT,
3266 mdwc->ahb2phy_base +
3267 PERIPH_SS_AHB2PHY_TOP_CFG);
3268 /* complete above write before using USB PHY */
3269 mb();
3270 }
3271 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3272 dwc3_msm_config_gdsc(mdwc, 0);
3273 }
3274 }
3275
3276 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3277 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3278 if (IS_ERR(mdwc->dbm)) {
3279 dev_err(&pdev->dev, "unable to get dbm device\n");
3280 ret = -EPROBE_DEFER;
3281 goto err;
3282 }
3283 /*
3284 * Add power event if the dbm indicates coming out of L1
3285 * by interrupt
3286 */
3287 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
Mayank Ranad339abe2017-05-31 09:19:49 -07003288 if (!mdwc->wakeup_irq[PWR_EVNT_IRQ].irq) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003289 dev_err(&pdev->dev,
3290 "need pwr_event_irq exiting L1\n");
3291 ret = -EINVAL;
3292 goto err;
3293 }
3294 }
3295 }
3296
3297 ext_hub_reset_gpio = of_get_named_gpio(node,
3298 "qcom,ext-hub-reset-gpio", 0);
3299
3300 if (gpio_is_valid(ext_hub_reset_gpio)
3301 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3302 "qcom,ext-hub-reset-gpio"))) {
3303 /* reset external hub */
3304 gpio_direction_output(ext_hub_reset_gpio, 1);
3305 /*
3306 * Hub reset should be asserted for minimum 5microsec
3307 * before deasserting.
3308 */
3309 usleep_range(5, 1000);
3310 gpio_direction_output(ext_hub_reset_gpio, 0);
3311 }
3312
3313 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3314 &mdwc->tx_fifo_size))
3315 dev_err(&pdev->dev,
3316 "unable to read platform data tx fifo size\n");
3317
3318 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3319 "qcom,disable-host-mode-pm");
Mayank Ranad339abe2017-05-31 09:19:49 -07003320 mdwc->use_pdc_interrupts = of_property_read_bool(node,
3321 "qcom,use-pdc-interrupts");
Mayank Rana511f3b22016-08-02 12:00:11 -07003322 dwc3_set_notifier(&dwc3_msm_notify_event);
3323
Jack Phambbe27962017-03-23 18:42:26 -07003324 ret = dwc3_msm_init_iommu(mdwc);
3325 if (ret)
3326 goto err;
3327
Mayank Rana511f3b22016-08-02 12:00:11 -07003328 /* Assumes dwc3 is the first DT child of dwc3-msm */
3329 dwc3_node = of_get_next_available_child(node, NULL);
3330 if (!dwc3_node) {
3331 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3332 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003333 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003334 }
3335
3336 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3337 if (ret) {
3338 dev_err(&pdev->dev,
3339 "failed to add create dwc3 core\n");
3340 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003341 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003342 }
3343
3344 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3345 of_node_put(dwc3_node);
3346 if (!mdwc->dwc3) {
3347 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3348 goto put_dwc3;
3349 }
3350
3351 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3352 "usb-phy", 0);
3353 if (IS_ERR(mdwc->hs_phy)) {
3354 dev_err(&pdev->dev, "unable to get hsphy device\n");
3355 ret = PTR_ERR(mdwc->hs_phy);
3356 goto put_dwc3;
3357 }
3358 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3359 "usb-phy", 1);
3360 if (IS_ERR(mdwc->ss_phy)) {
3361 dev_err(&pdev->dev, "unable to get ssphy device\n");
3362 ret = PTR_ERR(mdwc->ss_phy);
3363 goto put_dwc3;
3364 }
3365
3366 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3367 if (mdwc->bus_scale_table) {
3368 mdwc->bus_perf_client =
3369 msm_bus_scale_register_client(mdwc->bus_scale_table);
3370 }
3371
3372 dwc = platform_get_drvdata(mdwc->dwc3);
3373 if (!dwc) {
3374 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3375 goto put_dwc3;
3376 }
3377
3378 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3379 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3380
3381 if (cpu_to_affin)
3382 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3383
Mayank Ranaf4918d32016-12-15 13:35:55 -08003384 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3385 &mdwc->num_gsi_event_buffers);
3386
Jack Pham9faa51df2017-04-03 18:13:40 -07003387 /* IOMMU will be reattached upon each resume/connect */
3388 if (mdwc->iommu_map)
3389 arm_iommu_detach_device(mdwc->dev);
3390
Mayank Rana511f3b22016-08-02 12:00:11 -07003391 /*
3392 * Clocks and regulators will not be turned on until the first time
3393 * runtime PM resume is called. This is to allow for booting up with
3394 * charger already connected so as not to disturb PHY line states.
3395 */
3396 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3397 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003398 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3399 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003400 device_init_wakeup(mdwc->dev, 1);
3401
3402 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3403 pm_runtime_get_noresume(mdwc->dev);
3404
3405 ret = dwc3_msm_extcon_register(mdwc);
3406 if (ret)
3407 goto put_dwc3;
3408
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303409 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3410 &mdwc->pm_qos_latency);
3411 if (ret) {
3412 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3413 mdwc->pm_qos_latency = 0;
3414 }
3415
Hemant Kumar8220a982017-01-19 18:11:34 -08003416 mdwc->usb_psy = power_supply_get_by_name("usb");
3417 if (!mdwc->usb_psy) {
3418 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3419 pval.intval = -EINVAL;
3420 } else {
3421 power_supply_get_property(mdwc->usb_psy,
3422 POWER_SUPPLY_PROP_PRESENT, &pval);
3423 }
3424
Mayank Rana511f3b22016-08-02 12:00:11 -07003425 /* Update initial VBUS/ID state from extcon */
Jack Pham4e9dff72017-04-04 18:05:53 -07003426 if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
Mayank Rana511f3b22016-08-02 12:00:11 -07003427 EXTCON_USB))
3428 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Jack Pham4e9dff72017-04-04 18:05:53 -07003429 else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003430 EXTCON_USB_HOST))
3431 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003432 else if (!pval.intval) {
3433 /* USB cable is not connected */
3434 schedule_delayed_work(&mdwc->sm_work, 0);
3435 } else {
3436 if (pval.intval > 0)
3437 dev_info(mdwc->dev, "charger detection in progress\n");
3438 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003439
3440 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003441 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003442
Mayank Rana511f3b22016-08-02 12:00:11 -07003443 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3444 if (!dwc->is_drd && host_mode) {
3445 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3446 mdwc->id_state = DWC3_ID_GROUND;
3447 dwc3_ext_event_notify(mdwc);
3448 }
3449
3450 return 0;
3451
3452put_dwc3:
Mayank Rana511f3b22016-08-02 12:00:11 -07003453 if (mdwc->bus_perf_client)
3454 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Ziqi Chen0ea81162017-08-04 18:17:55 +08003455
Jack Phambbe27962017-03-23 18:42:26 -07003456uninit_iommu:
Jack Pham9faa51df2017-04-03 18:13:40 -07003457 if (mdwc->iommu_map) {
3458 arm_iommu_detach_device(mdwc->dev);
Jack Phambbe27962017-03-23 18:42:26 -07003459 arm_iommu_release_mapping(mdwc->iommu_map);
Jack Pham9faa51df2017-04-03 18:13:40 -07003460 }
Ziqi Chen0ea81162017-08-04 18:17:55 +08003461 of_platform_depopulate(&pdev->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003462err:
Ziqi Chen0ea81162017-08-04 18:17:55 +08003463 destroy_workqueue(mdwc->dwc3_wq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003464 return ret;
3465}
3466
Mayank Rana511f3b22016-08-02 12:00:11 -07003467static int dwc3_msm_remove(struct platform_device *pdev)
3468{
3469 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003470 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003471 int ret_pm;
3472
3473 device_remove_file(&pdev->dev, &dev_attr_mode);
3474
3475 if (cpu_to_affin)
3476 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3477
3478 /*
3479 * In case of system suspend, pm_runtime_get_sync fails.
3480 * Hence turn ON the clocks manually.
3481 */
3482 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003483 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003484 if (ret_pm < 0) {
3485 dev_err(mdwc->dev,
3486 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303487 if (mdwc->noc_aggr_clk)
3488 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003489 clk_prepare_enable(mdwc->utmi_clk);
3490 clk_prepare_enable(mdwc->core_clk);
3491 clk_prepare_enable(mdwc->iface_clk);
3492 clk_prepare_enable(mdwc->sleep_clk);
3493 if (mdwc->bus_aggr_clk)
3494 clk_prepare_enable(mdwc->bus_aggr_clk);
3495 clk_prepare_enable(mdwc->xo_clk);
3496 }
3497
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303498 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003499 cancel_delayed_work_sync(&mdwc->sm_work);
3500
3501 if (mdwc->hs_phy)
3502 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Ziqi Chen0ea81162017-08-04 18:17:55 +08003503 of_platform_depopulate(&pdev->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003504
Mayank Rana08e41922017-03-02 15:25:48 -08003505 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003506 pm_runtime_disable(mdwc->dev);
3507 pm_runtime_barrier(mdwc->dev);
3508 pm_runtime_put_sync(mdwc->dev);
3509 pm_runtime_set_suspended(mdwc->dev);
3510 device_wakeup_disable(mdwc->dev);
3511
3512 if (mdwc->bus_perf_client)
3513 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3514
3515 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3516 regulator_disable(mdwc->vbus_reg);
3517
Mayank Ranad339abe2017-05-31 09:19:49 -07003518 if (mdwc->wakeup_irq[HS_PHY_IRQ].irq)
3519 disable_irq(mdwc->wakeup_irq[HS_PHY_IRQ].irq);
3520 if (mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq)
3521 disable_irq(mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq);
3522 if (mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq)
3523 disable_irq(mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq);
3524 if (mdwc->wakeup_irq[SS_PHY_IRQ].irq)
3525 disable_irq(mdwc->wakeup_irq[SS_PHY_IRQ].irq);
3526 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003527
3528 clk_disable_unprepare(mdwc->utmi_clk);
3529 clk_set_rate(mdwc->core_clk, 19200000);
3530 clk_disable_unprepare(mdwc->core_clk);
3531 clk_disable_unprepare(mdwc->iface_clk);
3532 clk_disable_unprepare(mdwc->sleep_clk);
3533 clk_disable_unprepare(mdwc->xo_clk);
3534 clk_put(mdwc->xo_clk);
3535
3536 dwc3_msm_config_gdsc(mdwc, 0);
3537
Jack Phambbe27962017-03-23 18:42:26 -07003538 if (mdwc->iommu_map) {
3539 if (!atomic_read(&dwc->in_lpm))
3540 arm_iommu_detach_device(mdwc->dev);
3541 arm_iommu_release_mapping(mdwc->iommu_map);
3542 }
3543
Mayank Rana511f3b22016-08-02 12:00:11 -07003544 return 0;
3545}
3546
Jack Pham4d4e9342016-12-07 19:25:02 -08003547static int dwc3_msm_host_notifier(struct notifier_block *nb,
3548 unsigned long event, void *ptr)
3549{
3550 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3551 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3552 struct usb_device *udev = ptr;
3553 union power_supply_propval pval;
3554 unsigned int max_power;
3555
3556 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3557 return NOTIFY_DONE;
3558
3559 if (!mdwc->usb_psy) {
3560 mdwc->usb_psy = power_supply_get_by_name("usb");
3561 if (!mdwc->usb_psy)
3562 return NOTIFY_DONE;
3563 }
3564
3565 /*
3566 * For direct-attach devices, new udev is direct child of root hub
3567 * i.e. dwc -> xhci -> root_hub -> udev
3568 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3569 */
3570 if (udev->parent && !udev->parent->parent &&
3571 udev->dev.parent->parent == &dwc->xhci->dev) {
3572 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003573 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3574 /*
3575 * Core clock rate can be reduced only if root
3576 * hub SS port is not enabled/connected.
3577 */
3578 clk_set_rate(mdwc->core_clk,
3579 mdwc->core_clk_rate_hs);
3580 dev_dbg(mdwc->dev,
3581 "set hs core clk rate %ld\n",
3582 mdwc->core_clk_rate_hs);
3583 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3584 } else {
3585 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3586 }
3587
Jack Pham4d4e9342016-12-07 19:25:02 -08003588 if (udev->speed >= USB_SPEED_SUPER)
3589 max_power = udev->actconfig->desc.bMaxPower * 8;
3590 else
3591 max_power = udev->actconfig->desc.bMaxPower * 2;
3592 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3593 dev_name(&udev->dev), max_power);
3594
3595 /* inform PMIC of max power so it can optimize boost */
3596 pval.intval = max_power * 1000;
3597 power_supply_set_property(mdwc->usb_psy,
3598 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3599 } else {
3600 pval.intval = 0;
3601 power_supply_set_property(mdwc->usb_psy,
3602 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003603
3604 /* set rate back to default core clk rate */
3605 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3606 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3607 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003608 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003609 }
3610 }
3611
3612 return NOTIFY_DONE;
3613}
3614
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303615static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3616{
3617 static bool curr_perf_mode;
3618 int latency = mdwc->pm_qos_latency;
3619
3620 if ((curr_perf_mode == perf_mode) || !latency)
3621 return;
3622
3623 if (perf_mode)
3624 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3625 else
3626 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3627 PM_QOS_DEFAULT_VALUE);
3628
3629 curr_perf_mode = perf_mode;
3630 pr_debug("%s: latency updated to: %d\n", __func__,
3631 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3632}
3633
3634static void msm_dwc3_perf_vote_work(struct work_struct *w)
3635{
3636 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3637 perf_vote_work.work);
3638 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3639 static unsigned long last_irq_cnt;
3640 bool in_perf_mode = false;
3641
3642 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3643 in_perf_mode = true;
3644
3645 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3646 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3647
3648 last_irq_cnt = dwc->irq_cnt;
3649 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3650 schedule_delayed_work(&mdwc->perf_vote_work,
3651 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3652}
3653
Mayank Rana511f3b22016-08-02 12:00:11 -07003654#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3655
3656/**
3657 * dwc3_otg_start_host - helper function for starting/stoping the host
3658 * controller driver.
3659 *
3660 * @mdwc: Pointer to the dwc3_msm structure.
3661 * @on: start / stop the host controller driver.
3662 *
3663 * Returns 0 on success otherwise negative errno.
3664 */
3665static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3666{
3667 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3668 int ret = 0;
3669
Mayank Rana511f3b22016-08-02 12:00:11 -07003670 /*
3671 * The vbus_reg pointer could have multiple values
3672 * NULL: regulator_get() hasn't been called, or was previously deferred
3673 * IS_ERR: regulator could not be obtained, so skip using it
3674 * Valid pointer otherwise
3675 */
3676 if (!mdwc->vbus_reg) {
3677 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3678 "vbus_dwc3");
3679 if (IS_ERR(mdwc->vbus_reg) &&
3680 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3681 /* regulators may not be ready, so retry again later */
3682 mdwc->vbus_reg = NULL;
3683 return -EPROBE_DEFER;
3684 }
3685 }
3686
3687 if (on) {
3688 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3689
Mayank Rana511f3b22016-08-02 12:00:11 -07003690 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003691 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Hemant Kumarde1df692016-04-26 19:36:48 -07003692 mdwc->ss_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003693 usb_phy_notify_connect(mdwc->ss_phy,
3694 USB_SPEED_SUPER);
3695 }
Hemant Kumarde1df692016-04-26 19:36:48 -07003696
Mayank Rana0d5efd72017-06-08 10:06:00 -07003697 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
Hemant Kumarde1df692016-04-26 19:36:48 -07003698 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003699 dbg_event(0xFF, "StrtHost gync",
3700 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003701 if (!IS_ERR(mdwc->vbus_reg))
3702 ret = regulator_enable(mdwc->vbus_reg);
3703 if (ret) {
3704 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3705 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3706 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3707 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003708 dbg_event(0xFF, "vregerr psync",
3709 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003710 return ret;
3711 }
3712
3713 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3714
Jack Pham4d4e9342016-12-07 19:25:02 -08003715 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3716 usb_register_notify(&mdwc->host_nb);
3717
Manu Gautam976fdfc2016-08-18 09:27:35 +05303718 mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
3719 usb_register_atomic_notify(&mdwc->usbdev_nb);
Mayank Ranaa75caa52017-10-10 11:45:13 -07003720 ret = dwc3_host_init(dwc);
Mayank Rana511f3b22016-08-02 12:00:11 -07003721 if (ret) {
3722 dev_err(mdwc->dev,
3723 "%s: failed to add XHCI pdev ret=%d\n",
3724 __func__, ret);
3725 if (!IS_ERR(mdwc->vbus_reg))
3726 regulator_disable(mdwc->vbus_reg);
3727 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3728 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3729 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003730 dbg_event(0xFF, "pdeverr psync",
3731 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003732 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003733 return ret;
3734 }
3735
3736 /*
3737 * In some cases it is observed that USB PHY is not going into
3738 * suspend with host mode suspend functionality. Hence disable
3739 * XHCI's runtime PM here if disable_host_mode_pm is set.
3740 */
3741 if (mdwc->disable_host_mode_pm)
3742 pm_runtime_disable(&dwc->xhci->dev);
3743
3744 mdwc->in_host_mode = true;
3745 dwc3_usb3_phy_suspend(dwc, true);
3746
3747 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003748 dbg_event(0xFF, "StrtHost psync",
3749 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003750 pm_runtime_mark_last_busy(mdwc->dev);
3751 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303752#ifdef CONFIG_SMP
3753 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3754 mdwc->pm_qos_req_dma.irq = dwc->irq;
3755#endif
3756 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3757 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3758 /* start in perf mode for better performance initially */
3759 msm_dwc3_perf_vote_update(mdwc, true);
3760 schedule_delayed_work(&mdwc->perf_vote_work,
3761 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003762 } else {
3763 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3764
Manu Gautam976fdfc2016-08-18 09:27:35 +05303765 usb_unregister_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003766 if (!IS_ERR(mdwc->vbus_reg))
3767 ret = regulator_disable(mdwc->vbus_reg);
3768 if (ret) {
3769 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3770 return ret;
3771 }
3772
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303773 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3774 msm_dwc3_perf_vote_update(mdwc, false);
3775 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3776
Mayank Rana511f3b22016-08-02 12:00:11 -07003777 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003778 dbg_event(0xFF, "StopHost gsync",
3779 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003780 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
Mayank Rana0d5efd72017-06-08 10:06:00 -07003781 if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
3782 usb_phy_notify_disconnect(mdwc->ss_phy,
3783 USB_SPEED_SUPER);
3784 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3785 }
3786
Mayank Rana511f3b22016-08-02 12:00:11 -07003787 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Mayank Ranaa75caa52017-10-10 11:45:13 -07003788 dwc3_host_exit(dwc);
Jack Pham4d4e9342016-12-07 19:25:02 -08003789 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003790
Mayank Rana511f3b22016-08-02 12:00:11 -07003791 dwc3_usb3_phy_suspend(dwc, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07003792 mdwc->in_host_mode = false;
3793
Mayank Rana511f3b22016-08-02 12:00:11 -07003794 pm_runtime_mark_last_busy(mdwc->dev);
3795 pm_runtime_put_sync_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003796 dbg_event(0xFF, "StopHost psync",
3797 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003798 }
3799
3800 return 0;
3801}
3802
3803static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3804{
3805 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3806
3807 /* Update OTG VBUS Valid from HSPHY to controller */
3808 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3809 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3810 UTMI_OTG_VBUS_VALID,
3811 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3812
3813 /* Update only if Super Speed is supported */
3814 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3815 /* Update VBUS Valid from SSPHY to controller */
3816 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3817 LANE0_PWR_PRESENT,
3818 vbus_present ? LANE0_PWR_PRESENT : 0);
3819 }
3820}
3821
3822/**
3823 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3824 *
3825 * @mdwc: Pointer to the dwc3_msm structure.
3826 * @on: Turn ON/OFF the gadget.
3827 *
3828 * Returns 0 on success otherwise negative errno.
3829 */
3830static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3831{
3832 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3833
3834 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003835 dbg_event(0xFF, "StrtGdgt gsync",
3836 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003837
3838 if (on) {
3839 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3840 __func__, dwc->gadget.name);
3841
3842 dwc3_override_vbus_status(mdwc, true);
3843 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3844 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3845
3846 /*
3847 * Core reset is not required during start peripheral. Only
3848 * DBM reset is required, hence perform only DBM reset here.
3849 */
3850 dwc3_msm_block_reset(mdwc, false);
3851
3852 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3853 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303854#ifdef CONFIG_SMP
3855 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3856 mdwc->pm_qos_req_dma.irq = dwc->irq;
3857#endif
3858 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3859 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3860 /* start in perf mode for better performance initially */
3861 msm_dwc3_perf_vote_update(mdwc, true);
3862 schedule_delayed_work(&mdwc->perf_vote_work,
3863 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003864 } else {
3865 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3866 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303867 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3868 msm_dwc3_perf_vote_update(mdwc, false);
3869 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3870
Mayank Rana511f3b22016-08-02 12:00:11 -07003871 usb_gadget_vbus_disconnect(&dwc->gadget);
3872 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3873 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3874 dwc3_override_vbus_status(mdwc, false);
3875 dwc3_usb3_phy_suspend(dwc, false);
3876 }
3877
3878 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003879 dbg_event(0xFF, "StopGdgt psync",
3880 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003881
3882 return 0;
3883}
3884
Mayank Rana54d60432017-07-18 12:10:04 -07003885/* speed: 0 - USB_SPEED_HIGH, 1 - USB_SPEED_SUPER */
3886static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
3887 unsigned long event, void *ptr)
3888{
3889 struct dwc3_msm *mdwc;
3890 struct dwc3 *dwc;
3891 int ret = -EINVAL, usb_speed;
3892
3893 mdwc = container_of(nb, struct dwc3_msm, host_restart_nb);
3894 dwc = platform_get_drvdata(mdwc->dwc3);
3895
3896 usb_speed = (event == 0 ? USB_SPEED_HIGH : USB_SPEED_SUPER);
3897 if (dwc->maximum_speed == usb_speed)
3898 goto err;
3899
Mayank Rana8a5cba82017-10-27 15:12:54 -07003900 dbg_event(0xFF, "fw_restarthost", 0);
3901 flush_delayed_work(&mdwc->sm_work);
Mayank Rana54d60432017-07-18 12:10:04 -07003902 dbg_event(0xFF, "stop_host_mode", dwc->maximum_speed);
3903 ret = dwc3_otg_start_host(mdwc, 0);
3904 if (ret)
3905 goto err;
3906
3907 /*
3908 * stop host mode functionality performs autosuspend with mdwc
3909 * device, and it may take sometime to call PM runtime suspend.
3910 * Hence call pm_runtime_suspend() API to invoke PM runtime
3911 * suspend immediately to put USB controller and PHYs into suspend.
3912 */
3913 ret = pm_runtime_suspend(mdwc->dev);
3914 dbg_event(0xFF, "pm_runtime_sus", ret);
3915
3916 dwc->maximum_speed = usb_speed;
3917 mdwc->otg_state = OTG_STATE_B_IDLE;
3918 schedule_delayed_work(&mdwc->sm_work, 0);
3919 dbg_event(0xFF, "complete_host_change", dwc->maximum_speed);
3920err:
3921 return ret;
3922}
3923
Hemant Kumar006fae42017-07-12 18:11:25 -07003924static int get_psy_type(struct dwc3_msm *mdwc)
Mayank Rana511f3b22016-08-02 12:00:11 -07003925{
Jack Pham8caff352016-08-19 16:33:55 -07003926 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003927
3928 if (mdwc->charging_disabled)
Hemant Kumar006fae42017-07-12 18:11:25 -07003929 return -EINVAL;
Mayank Rana511f3b22016-08-02 12:00:11 -07003930
3931 if (!mdwc->usb_psy) {
3932 mdwc->usb_psy = power_supply_get_by_name("usb");
3933 if (!mdwc->usb_psy) {
Hemant Kumar006fae42017-07-12 18:11:25 -07003934 dev_err(mdwc->dev, "Could not get usb psy\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003935 return -ENODEV;
3936 }
3937 }
3938
Hemant Kumar006fae42017-07-12 18:11:25 -07003939 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
3940 &pval);
3941
3942 return pval.intval;
3943}
3944
3945static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3946{
3947 union power_supply_propval pval = {0};
3948 int ret, psy_type;
3949
Hemant Kumar006fae42017-07-12 18:11:25 -07003950 psy_type = get_psy_type(mdwc);
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05303951 if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
Hemant Kumard6bae052017-07-27 15:11:25 -07003952 pval.intval = -ETIMEDOUT;
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05303953 goto set_prop;
Hemant Kumard6bae052017-07-27 15:11:25 -07003954 }
Jack Pham8caff352016-08-19 16:33:55 -07003955
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05303956 if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
3957 return 0;
3958
3959 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3960 /* Set max current limit in uA */
3961 pval.intval = 1000 * mA;
3962
3963set_prop:
Jack Phamd72bafe2016-08-09 11:07:22 -07003964 ret = power_supply_set_property(mdwc->usb_psy,
Nicholas Troast7f55c922017-07-25 13:18:03 -07003965 POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
Jack Phamd72bafe2016-08-09 11:07:22 -07003966 if (ret) {
3967 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3968 return ret;
3969 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003970
3971 mdwc->max_power = mA;
3972 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003973}
3974
3975
3976/**
3977 * dwc3_otg_sm_work - workqueue function.
3978 *
3979 * @w: Pointer to the dwc3 otg workqueue
3980 *
3981 * NOTE: After any change in otg_state, we must reschdule the state machine.
3982 */
3983static void dwc3_otg_sm_work(struct work_struct *w)
3984{
3985 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3986 struct dwc3 *dwc = NULL;
3987 bool work = 0;
3988 int ret = 0;
3989 unsigned long delay = 0;
3990 const char *state;
3991
3992 if (mdwc->dwc3)
3993 dwc = platform_get_drvdata(mdwc->dwc3);
3994
3995 if (!dwc) {
3996 dev_err(mdwc->dev, "dwc is NULL.\n");
3997 return;
3998 }
3999
4000 state = usb_otg_state_string(mdwc->otg_state);
4001 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08004002 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004003
4004 /* Check OTG state */
4005 switch (mdwc->otg_state) {
4006 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08004007 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07004008 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08004009 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
4010 dbg_event(0xFF, "undef_id_!bsv", 0);
4011 pm_runtime_set_active(mdwc->dev);
4012 pm_runtime_enable(mdwc->dev);
4013 pm_runtime_get_noresume(mdwc->dev);
4014 dwc3_msm_resume(mdwc);
4015 pm_runtime_put_sync(mdwc->dev);
4016 dbg_event(0xFF, "Undef NoUSB",
4017 atomic_read(&mdwc->dev->power.usage_count));
4018 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07004019 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08004020 }
Mayank Rana511f3b22016-08-02 12:00:11 -07004021
Mayank Rana08e41922017-03-02 15:25:48 -08004022 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004023 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08004024 pm_runtime_set_suspended(mdwc->dev);
4025 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07004026 /* fall-through */
4027 case OTG_STATE_B_IDLE:
4028 if (!test_bit(ID, &mdwc->inputs)) {
4029 dev_dbg(mdwc->dev, "!id\n");
4030 mdwc->otg_state = OTG_STATE_A_IDLE;
4031 work = 1;
4032 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
4033 dev_dbg(mdwc->dev, "b_sess_vld\n");
Hemant Kumar006fae42017-07-12 18:11:25 -07004034 if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
4035 queue_delayed_work(mdwc->dwc3_wq,
4036 &mdwc->sdp_check,
4037 msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
Mayank Rana511f3b22016-08-02 12:00:11 -07004038 /*
4039 * Increment pm usage count upon cable connect. Count
4040 * is decremented in OTG_STATE_B_PERIPHERAL state on
4041 * cable disconnect or in bus suspend.
4042 */
4043 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004044 dbg_event(0xFF, "BIDLE gsync",
4045 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004046 dwc3_otg_start_peripheral(mdwc, 1);
4047 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4048 work = 1;
4049 } else {
4050 dwc3_msm_gadget_vbus_draw(mdwc, 0);
4051 dev_dbg(mdwc->dev, "Cable disconnected\n");
4052 }
4053 break;
4054
4055 case OTG_STATE_B_PERIPHERAL:
4056 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
4057 !test_bit(ID, &mdwc->inputs)) {
4058 dev_dbg(mdwc->dev, "!id || !bsv\n");
4059 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004060 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004061 dwc3_otg_start_peripheral(mdwc, 0);
4062 /*
4063 * Decrement pm usage count upon cable disconnect
4064 * which was incremented upon cable connect in
4065 * OTG_STATE_B_IDLE state
4066 */
4067 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004068 dbg_event(0xFF, "!BSV psync",
4069 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004070 work = 1;
4071 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
4072 test_bit(B_SESS_VLD, &mdwc->inputs)) {
4073 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
4074 mdwc->otg_state = OTG_STATE_B_SUSPEND;
4075 /*
4076 * Decrement pm usage count upon bus suspend.
4077 * Count was incremented either upon cable
4078 * connect in OTG_STATE_B_IDLE or host
4079 * initiated resume after bus suspend in
4080 * OTG_STATE_B_SUSPEND state
4081 */
4082 pm_runtime_mark_last_busy(mdwc->dev);
4083 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004084 dbg_event(0xFF, "SUSP put",
4085 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004086 }
4087 break;
4088
4089 case OTG_STATE_B_SUSPEND:
4090 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
4091 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
4092 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004093 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004094 dwc3_otg_start_peripheral(mdwc, 0);
4095 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
4096 dev_dbg(mdwc->dev, "BSUSP !susp\n");
4097 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4098 /*
4099 * Increment pm usage count upon host
4100 * initiated resume. Count was decremented
4101 * upon bus suspend in
4102 * OTG_STATE_B_PERIPHERAL state.
4103 */
4104 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004105 dbg_event(0xFF, "!SUSP gsync",
4106 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004107 }
4108 break;
4109
4110 case OTG_STATE_A_IDLE:
4111 /* Switch to A-Device*/
4112 if (test_bit(ID, &mdwc->inputs)) {
4113 dev_dbg(mdwc->dev, "id\n");
4114 mdwc->otg_state = OTG_STATE_B_IDLE;
4115 mdwc->vbus_retry_count = 0;
4116 work = 1;
4117 } else {
4118 mdwc->otg_state = OTG_STATE_A_HOST;
4119 ret = dwc3_otg_start_host(mdwc, 1);
4120 if ((ret == -EPROBE_DEFER) &&
4121 mdwc->vbus_retry_count < 3) {
4122 /*
4123 * Get regulator failed as regulator driver is
4124 * not up yet. Will try to start host after 1sec
4125 */
4126 mdwc->otg_state = OTG_STATE_A_IDLE;
4127 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
4128 delay = VBUS_REG_CHECK_DELAY;
4129 work = 1;
4130 mdwc->vbus_retry_count++;
4131 } else if (ret) {
4132 dev_err(mdwc->dev, "unable to start host\n");
4133 mdwc->otg_state = OTG_STATE_A_IDLE;
4134 goto ret;
4135 }
4136 }
4137 break;
4138
4139 case OTG_STATE_A_HOST:
Manu Gautam976fdfc2016-08-18 09:27:35 +05304140 if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
4141 dev_dbg(mdwc->dev, "id || hc_died\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07004142 dwc3_otg_start_host(mdwc, 0);
4143 mdwc->otg_state = OTG_STATE_B_IDLE;
4144 mdwc->vbus_retry_count = 0;
Manu Gautam976fdfc2016-08-18 09:27:35 +05304145 mdwc->hc_died = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07004146 work = 1;
4147 } else {
4148 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004149 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004150 if (dwc)
4151 pm_runtime_resume(&dwc->xhci->dev);
4152 }
4153 break;
4154
4155 default:
4156 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
4157
4158 }
4159
4160 if (work)
4161 schedule_delayed_work(&mdwc->sm_work, delay);
4162
4163ret:
4164 return;
4165}
4166
4167#ifdef CONFIG_PM_SLEEP
4168static int dwc3_msm_pm_suspend(struct device *dev)
4169{
4170 int ret = 0;
4171 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4172 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4173
4174 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004175 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004176
4177 flush_workqueue(mdwc->dwc3_wq);
4178 if (!atomic_read(&dwc->in_lpm)) {
4179 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
4180 return -EBUSY;
4181 }
4182
4183 ret = dwc3_msm_suspend(mdwc);
4184 if (!ret)
4185 atomic_set(&mdwc->pm_suspended, 1);
4186
4187 return ret;
4188}
4189
4190static int dwc3_msm_pm_resume(struct device *dev)
4191{
4192 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004193 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004194
4195 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004196 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004197
Mayank Rana511f3b22016-08-02 12:00:11 -07004198 /* flush to avoid race in read/write of pm_suspended */
4199 flush_workqueue(mdwc->dwc3_wq);
4200 atomic_set(&mdwc->pm_suspended, 0);
4201
4202 /* kick in otg state machine */
4203 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
4204
4205 return 0;
4206}
4207#endif
4208
4209#ifdef CONFIG_PM
4210static int dwc3_msm_runtime_idle(struct device *dev)
4211{
Mayank Rana08e41922017-03-02 15:25:48 -08004212 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4213 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4214
Mayank Rana511f3b22016-08-02 12:00:11 -07004215 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004216 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004217
4218 return 0;
4219}
4220
4221static int dwc3_msm_runtime_suspend(struct device *dev)
4222{
4223 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004224 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004225
4226 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004227 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004228
4229 return dwc3_msm_suspend(mdwc);
4230}
4231
4232static int dwc3_msm_runtime_resume(struct device *dev)
4233{
4234 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004235 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004236
4237 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004238 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004239
4240 return dwc3_msm_resume(mdwc);
4241}
4242#endif
4243
4244static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
4245 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
4246 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
4247 dwc3_msm_runtime_idle)
4248};
4249
4250static const struct of_device_id of_dwc3_matach[] = {
4251 {
4252 .compatible = "qcom,dwc-usb3-msm",
4253 },
4254 { },
4255};
4256MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4257
4258static struct platform_driver dwc3_msm_driver = {
4259 .probe = dwc3_msm_probe,
4260 .remove = dwc3_msm_remove,
4261 .driver = {
4262 .name = "msm-dwc3",
4263 .pm = &dwc3_msm_dev_pm_ops,
4264 .of_match_table = of_dwc3_matach,
4265 },
4266};
4267
4268MODULE_LICENSE("GPL v2");
4269MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4270
4271static int dwc3_msm_init(void)
4272{
4273 return platform_driver_register(&dwc3_msm_driver);
4274}
4275module_init(dwc3_msm_init);
4276
4277static void __exit dwc3_msm_exit(void)
4278{
4279 platform_driver_unregister(&dwc3_msm_driver);
4280}
4281module_exit(dwc3_msm_exit);