blob: b6ad39b085f7af9802b181b6a78e268c1d7d9b2b [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
Hemant Kumar006fae42017-07-12 18:11:25 -070058#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
59
Mayank Rana511f3b22016-08-02 12:00:11 -070060/* time out to wait for USB cable status notification (in ms)*/
61#define SM_INIT_TIMEOUT 30000
62
63/* AHB2PHY register offsets */
64#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
65
66/* AHB2PHY read/write waite value */
67#define ONE_READ_WRITE_WAIT 0x11
68
69/* cpu to fix usb interrupt */
70static int cpu_to_affin;
71module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
72MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
73
Mayank Ranaf70d8212017-06-12 14:02:07 -070074/* override for USB speed */
75static int override_usb_speed;
76module_param(override_usb_speed, int, 0644);
77MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
78
Mayank Rana511f3b22016-08-02 12:00:11 -070079/* XHCI registers */
80#define USB3_HCSPARAMS1 (0x4)
81#define USB3_PORTSC (0x420)
82
83/**
84 * USB QSCRATCH Hardware registers
85 *
86 */
87#define QSCRATCH_REG_OFFSET (0x000F8800)
88#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
89#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
90#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
91#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
92
93#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
94#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
95#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
96#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
97#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
98
99/* QSCRATCH_GENERAL_CFG register bit offset */
100#define PIPE_UTMI_CLK_SEL BIT(0)
101#define PIPE3_PHYSTATUS_SW BIT(3)
102#define PIPE_UTMI_CLK_DIS BIT(8)
103
104#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
105#define UTMI_OTG_VBUS_VALID BIT(20)
106#define SW_SESSVLD_SEL BIT(28)
107
108#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
109#define LANE0_PWR_PRESENT BIT(24)
110
111/* GSI related registers */
112#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
113#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
114
115#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
116#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
117#define GSI_CLK_EN_MASK BIT(12)
118#define BLOCK_GSI_WR_GO_MASK BIT(1)
119#define GSI_EN_MASK BIT(0)
120
121#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
122#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
123#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
124#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
125
126#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
127#define GSI_WR_CTRL_STATE_MASK BIT(15)
128
Mayank Ranaf4918d32016-12-15 13:35:55 -0800129#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
130#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
131#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
132#define DWC3_GEVENT_TYPE_GSI 0x3
133
Mayank Rana511f3b22016-08-02 12:00:11 -0700134struct dwc3_msm_req_complete {
135 struct list_head list_item;
136 struct usb_request *req;
137 void (*orig_complete)(struct usb_ep *ep,
138 struct usb_request *req);
139};
140
141enum dwc3_id_state {
142 DWC3_ID_GROUND = 0,
143 DWC3_ID_FLOAT,
144};
145
146/* for type c cable */
147enum plug_orientation {
148 ORIENTATION_NONE,
149 ORIENTATION_CC1,
150 ORIENTATION_CC2,
151};
152
Mayank Ranad339abe2017-05-31 09:19:49 -0700153enum msm_usb_irq {
154 HS_PHY_IRQ,
155 PWR_EVNT_IRQ,
156 DP_HS_PHY_IRQ,
157 DM_HS_PHY_IRQ,
158 SS_PHY_IRQ,
159 USB_MAX_IRQ
160};
161
162struct usb_irq {
163 char *name;
164 int irq;
165 bool enable;
166};
167
168static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
169 {"hs_phy_irq", 0},
170 {"pwr_event_irq", 0},
171 {"dp_hs_phy_irq", 0},
172 {"dm_hs_phy_irq", 0},
173 {"ss_phy_irq", 0},
174};
175
Mayank Rana511f3b22016-08-02 12:00:11 -0700176/* Input bits to state machine (mdwc->inputs) */
177
178#define ID 0
179#define B_SESS_VLD 1
180#define B_SUSPEND 2
181
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530182#define PM_QOS_SAMPLE_SEC 2
183#define PM_QOS_THRESHOLD 400
184
Mayank Rana511f3b22016-08-02 12:00:11 -0700185struct dwc3_msm {
186 struct device *dev;
187 void __iomem *base;
188 void __iomem *ahb2phy_base;
189 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700190 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700191 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
192 struct list_head req_complete_list;
193 struct clk *xo_clk;
194 struct clk *core_clk;
195 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800196 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700197 struct clk *iface_clk;
198 struct clk *sleep_clk;
199 struct clk *utmi_clk;
200 unsigned int utmi_clk_rate;
201 struct clk *utmi_clk_src;
202 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530203 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530205 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700206 struct regulator *dwc3_gdsc;
207
208 struct usb_phy *hs_phy, *ss_phy;
209
210 struct dbm *dbm;
211
212 /* VBUS regulator for host mode */
213 struct regulator *vbus_reg;
214 int vbus_retry_count;
215 bool resume_pending;
216 atomic_t pm_suspended;
Mayank Ranad339abe2017-05-31 09:19:49 -0700217 struct usb_irq wakeup_irq[USB_MAX_IRQ];
Mayank Rana511f3b22016-08-02 12:00:11 -0700218 struct work_struct resume_work;
219 struct work_struct restart_usb_work;
220 bool in_restart;
221 struct workqueue_struct *dwc3_wq;
222 struct delayed_work sm_work;
223 unsigned long inputs;
224 unsigned int max_power;
225 bool charging_disabled;
226 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700227 u32 bus_perf_client;
228 struct msm_bus_scale_pdata *bus_scale_table;
229 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700230 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700231 bool in_host_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800232 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700233 unsigned int tx_fifo_size;
234 bool vbus_active;
235 bool suspend;
236 bool disable_host_mode_pm;
Mayank Ranad339abe2017-05-31 09:19:49 -0700237 bool use_pdc_interrupts;
Mayank Rana511f3b22016-08-02 12:00:11 -0700238 enum dwc3_id_state id_state;
239 unsigned long lpm_flags;
240#define MDWC3_SS_PHY_SUSPEND BIT(0)
241#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
242#define MDWC3_POWER_COLLAPSE BIT(2)
243
244 unsigned int irq_to_affin;
245 struct notifier_block dwc3_cpu_notifier;
Manu Gautam976fdfc2016-08-18 09:27:35 +0530246 struct notifier_block usbdev_nb;
247 bool hc_died;
Mayank Rana511f3b22016-08-02 12:00:11 -0700248
249 struct extcon_dev *extcon_vbus;
250 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800251 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700252 struct notifier_block vbus_nb;
253 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800254 struct notifier_block eud_event_nb;
Mayank Rana54d60432017-07-18 12:10:04 -0700255 struct notifier_block host_restart_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700256
Jack Pham4d4e9342016-12-07 19:25:02 -0800257 struct notifier_block host_nb;
258
Mayank Rana511f3b22016-08-02 12:00:11 -0700259 atomic_t in_p3;
260 unsigned int lpm_to_suspend_delay;
261 bool init;
262 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800263 u32 num_gsi_event_buffers;
264 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530265 int pm_qos_latency;
266 struct pm_qos_request pm_qos_req_dma;
267 struct delayed_work perf_vote_work;
Hemant Kumar006fae42017-07-12 18:11:25 -0700268 struct delayed_work sdp_check;
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +0530269 struct mutex suspend_resume_mutex;
Mayank Rana511f3b22016-08-02 12:00:11 -0700270};
271
272#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
273#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
274#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
275
276#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
277#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
278#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
279
280#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
281#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
282#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
283
284#define DSTS_CONNECTSPD_SS 0x4
285
286
287static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
288static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800289static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana54d60432017-07-18 12:10:04 -0700290static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
291 unsigned long event, void *ptr);
Mayank Ranaf70d8212017-06-12 14:02:07 -0700292
293static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
294{
295
296 return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
297 (speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
298 && (speed <= dwc->maximum_speed));
299}
300
Mayank Rana511f3b22016-08-02 12:00:11 -0700301/**
302 *
303 * Read register with debug info.
304 *
305 * @base - DWC3 base virtual address.
306 * @offset - register offset.
307 *
308 * @return u32
309 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700310static inline u32 dwc3_msm_read_reg(void __iomem *base, u32 offset)
Mayank Rana511f3b22016-08-02 12:00:11 -0700311{
312 u32 val = ioread32(base + offset);
313 return val;
314}
315
316/**
317 * Read register masked field with debug info.
318 *
319 * @base - DWC3 base virtual address.
320 * @offset - register offset.
321 * @mask - register bitmask.
322 *
323 * @return u32
324 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700325static inline u32 dwc3_msm_read_reg_field(void __iomem *base,
Mayank Rana511f3b22016-08-02 12:00:11 -0700326 u32 offset,
327 const u32 mask)
328{
Mayank Ranad796cab2017-07-11 15:34:12 -0700329 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700330 u32 val = ioread32(base + offset);
331
332 val &= mask; /* clear other bits */
333 val >>= shift;
334 return val;
335}
336
337/**
338 *
339 * Write register with debug info.
340 *
341 * @base - DWC3 base virtual address.
342 * @offset - register offset.
343 * @val - value to write.
344 *
345 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700346static inline void dwc3_msm_write_reg(void __iomem *base, u32 offset, u32 val)
Mayank Rana511f3b22016-08-02 12:00:11 -0700347{
348 iowrite32(val, base + offset);
349}
350
351/**
352 * Write register masked field with debug info.
353 *
354 * @base - DWC3 base virtual address.
355 * @offset - register offset.
356 * @mask - register bitmask.
357 * @val - value to write.
358 *
359 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700360static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700361 const u32 mask, u32 val)
362{
Mayank Ranad796cab2017-07-11 15:34:12 -0700363 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700364 u32 tmp = ioread32(base + offset);
365
366 tmp &= ~mask; /* clear written bits */
367 val = tmp | (val << shift);
368 iowrite32(val, base + offset);
369}
370
371/**
372 * Write register and read back masked value to confirm it is written
373 *
374 * @base - DWC3 base virtual address.
375 * @offset - register offset.
376 * @mask - register bitmask specifying what should be updated
377 * @val - value to write.
378 *
379 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700380static inline void dwc3_msm_write_readback(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700381 const u32 mask, u32 val)
382{
383 u32 write_val, tmp = ioread32(base + offset);
384
385 tmp &= ~mask; /* retain other bits */
386 write_val = tmp | val;
387
388 iowrite32(write_val, base + offset);
389
390 /* Read back to see if val was written */
391 tmp = ioread32(base + offset);
392 tmp &= mask; /* clear other bits */
393
394 if (tmp != val)
395 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
396 __func__, val, offset);
397}
398
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800399static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
400{
401 int i, num_ports;
402 u32 reg;
403
404 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
405 num_ports = HCS_MAX_PORTS(reg);
406
407 for (i = 0; i < num_ports; i++) {
408 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
409 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
410 return true;
411 }
412
413 return false;
414}
415
Mayank Rana511f3b22016-08-02 12:00:11 -0700416static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
417{
418 int i, num_ports;
419 u32 reg;
420
421 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
422 num_ports = HCS_MAX_PORTS(reg);
423
424 for (i = 0; i < num_ports; i++) {
425 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
426 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
427 return true;
428 }
429
430 return false;
431}
432
433static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
434{
435 u8 speed;
436
437 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
438 return !!(speed & DSTS_CONNECTSPD_SS);
439}
440
441static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
442{
443 if (mdwc->in_host_mode)
444 return dwc3_msm_is_host_superspeed(mdwc);
445
446 return dwc3_msm_is_dev_superspeed(mdwc);
447}
448
449#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
450/**
451 * Configure the DBM with the BAM's data fifo.
452 * This function is called by the USB BAM Driver
453 * upon initialization.
454 *
455 * @ep - pointer to usb endpoint.
456 * @addr - address of data fifo.
457 * @size - size of data fifo.
458 *
459 */
460int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
461 u32 size, u8 dst_pipe_idx)
462{
463 struct dwc3_ep *dep = to_dwc3_ep(ep);
464 struct dwc3 *dwc = dep->dwc;
465 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
466
467 dev_dbg(mdwc->dev, "%s\n", __func__);
468
469 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
470 dst_pipe_idx);
471}
472
473
474/**
475* Cleanups for msm endpoint on request complete.
476*
477* Also call original request complete.
478*
479* @usb_ep - pointer to usb_ep instance.
480* @request - pointer to usb_request instance.
481*
482* @return int - 0 on success, negative on error.
483*/
484static void dwc3_msm_req_complete_func(struct usb_ep *ep,
485 struct usb_request *request)
486{
487 struct dwc3_ep *dep = to_dwc3_ep(ep);
488 struct dwc3 *dwc = dep->dwc;
489 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
490 struct dwc3_msm_req_complete *req_complete = NULL;
491
492 /* Find original request complete function and remove it from list */
493 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
494 if (req_complete->req == request)
495 break;
496 }
497 if (!req_complete || req_complete->req != request) {
498 dev_err(dep->dwc->dev, "%s: could not find the request\n",
499 __func__);
500 return;
501 }
502 list_del(&req_complete->list_item);
503
504 /*
505 * Release another one TRB to the pool since DBM queue took 2 TRBs
506 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
507 * released only one.
508 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700509 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700510
511 /* Unconfigure dbm ep */
512 dbm_ep_unconfig(mdwc->dbm, dep->number);
513
514 /*
515 * If this is the last endpoint we unconfigured, than reset also
516 * the event buffers; unless unconfiguring the ep due to lpm,
517 * in which case the event buffer only gets reset during the
518 * block reset.
519 */
520 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
521 !dbm_reset_ep_after_lpm(mdwc->dbm))
522 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
523
524 /*
525 * Call original complete function, notice that dwc->lock is already
526 * taken by the caller of this function (dwc3_gadget_giveback()).
527 */
528 request->complete = req_complete->orig_complete;
529 if (request->complete)
530 request->complete(ep, request);
531
532 kfree(req_complete);
533}
534
535
536/**
537* Helper function
538*
539* Reset DBM endpoint.
540*
541* @mdwc - pointer to dwc3_msm instance.
542* @dep - pointer to dwc3_ep instance.
543*
544* @return int - 0 on success, negative on error.
545*/
546static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
547{
548 int ret;
549
550 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
551
552 /* Reset the dbm endpoint */
553 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
554 if (ret) {
555 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
556 __func__);
557 return ret;
558 }
559
560 /*
561 * The necessary delay between asserting and deasserting the dbm ep
562 * reset is based on the number of active endpoints. If there is more
563 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
564 * delay will suffice.
565 */
566 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
567 usleep_range(1000, 1200);
568 else
569 udelay(10);
570 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
571 if (ret) {
572 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
573 __func__);
574 return ret;
575 }
576
577 return 0;
578}
579
580/**
581* Reset the DBM endpoint which is linked to the given USB endpoint.
582*
583* @usb_ep - pointer to usb_ep instance.
584*
585* @return int - 0 on success, negative on error.
586*/
587
588int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
589{
590 struct dwc3_ep *dep = to_dwc3_ep(ep);
591 struct dwc3 *dwc = dep->dwc;
592 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
593
594 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
595}
596EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
597
598
599/**
600* Helper function.
601* See the header of the dwc3_msm_ep_queue function.
602*
603* @dwc3_ep - pointer to dwc3_ep instance.
604* @req - pointer to dwc3_request instance.
605*
606* @return int - 0 on success, negative on error.
607*/
608static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
609{
610 struct dwc3_trb *trb;
611 struct dwc3_trb *trb_link;
612 struct dwc3_gadget_ep_cmd_params params;
613 u32 cmd;
614 int ret = 0;
615
Mayank Rana83ad5822016-08-09 14:17:22 -0700616 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700617 * this request is issued with start transfer. The request will be out
618 * from this list in 2 cases. The first is that the transfer will be
619 * completed (not if the transfer is endless using a circular TRBs with
620 * with link TRB). The second case is an option to do stop stransfer,
621 * this can be initiated by the function driver when calling dequeue.
622 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700623 req->started = true;
624 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700625
626 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana9ca186c2017-06-19 17:57:21 -0700627 trb = &dep->trb_pool[dep->trb_enqueue];
628 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700629 memset(trb, 0, sizeof(*trb));
630
631 req->trb = trb;
632 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
633 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
634 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
635 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
636 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
637
638 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana9ca186c2017-06-19 17:57:21 -0700639 trb_link = &dep->trb_pool[dep->trb_enqueue];
640 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700641 memset(trb_link, 0, sizeof(*trb_link));
642
643 trb_link->bpl = lower_32_bits(req->trb_dma);
644 trb_link->bph = DBM_TRB_BIT |
645 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
646 trb_link->size = 0;
647 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
648
649 /*
650 * Now start the transfer
651 */
652 memset(&params, 0, sizeof(params));
653 params.param0 = 0; /* TDAddr High */
654 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
655
656 /* DBM requires IOC to be set */
657 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700658 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700659 if (ret < 0) {
660 dev_dbg(dep->dwc->dev,
661 "%s: failed to send STARTTRANSFER command\n",
662 __func__);
663
664 list_del(&req->list);
665 return ret;
666 }
667 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700668 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700669
670 return ret;
671}
672
673/**
674* Queue a usb request to the DBM endpoint.
675* This function should be called after the endpoint
676* was enabled by the ep_enable.
677*
678* This function prepares special structure of TRBs which
679* is familiar with the DBM HW, so it will possible to use
680* this endpoint in DBM mode.
681*
682* The TRBs prepared by this function, is one normal TRB
683* which point to a fake buffer, followed by a link TRB
684* that points to the first TRB.
685*
686* The API of this function follow the regular API of
687* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
688*
689* @usb_ep - pointer to usb_ep instance.
690* @request - pointer to usb_request instance.
691* @gfp_flags - possible flags.
692*
693* @return int - 0 on success, negative on error.
694*/
695static int dwc3_msm_ep_queue(struct usb_ep *ep,
696 struct usb_request *request, gfp_t gfp_flags)
697{
698 struct dwc3_request *req = to_dwc3_request(request);
699 struct dwc3_ep *dep = to_dwc3_ep(ep);
700 struct dwc3 *dwc = dep->dwc;
701 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
702 struct dwc3_msm_req_complete *req_complete;
703 unsigned long flags;
704 int ret = 0, size;
705 u8 bam_pipe;
706 bool producer;
707 bool disable_wb;
708 bool internal_mem;
709 bool ioc;
710 bool superspeed;
711
712 if (!(request->udc_priv & MSM_SPS_MODE)) {
713 /* Not SPS mode, call original queue */
714 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
715 __func__);
716
717 return (mdwc->original_ep_ops[dep->number])->queue(ep,
718 request,
719 gfp_flags);
720 }
721
722 /* HW restriction regarding TRB size (8KB) */
723 if (req->request.length < 0x2000) {
724 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
725 return -EINVAL;
726 }
727
728 /*
729 * Override req->complete function, but before doing that,
730 * store it's original pointer in the req_complete_list.
731 */
732 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
733 if (!req_complete)
734 return -ENOMEM;
735
736 req_complete->req = request;
737 req_complete->orig_complete = request->complete;
738 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
739 request->complete = dwc3_msm_req_complete_func;
740
741 /*
742 * Configure the DBM endpoint
743 */
744 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
745 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
746 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
747 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
748 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
749
750 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
751 disable_wb, internal_mem, ioc);
752 if (ret < 0) {
753 dev_err(mdwc->dev,
754 "error %d after calling dbm_ep_config\n", ret);
755 return ret;
756 }
757
758 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
759 __func__, request, ep->name, request->length);
760 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
761 dbm_event_buffer_config(mdwc->dbm,
762 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
763 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
764 DWC3_GEVNTSIZ_SIZE(size));
765
766 /*
767 * We must obtain the lock of the dwc3 core driver,
768 * including disabling interrupts, so we will be sure
769 * that we are the only ones that configure the HW device
770 * core and ensure that we queuing the request will finish
771 * as soon as possible so we will release back the lock.
772 */
773 spin_lock_irqsave(&dwc->lock, flags);
774 if (!dep->endpoint.desc) {
775 dev_err(mdwc->dev,
776 "%s: trying to queue request %p to disabled ep %s\n",
777 __func__, request, ep->name);
778 ret = -EPERM;
779 goto err;
780 }
781
782 if (dep->number == 0 || dep->number == 1) {
783 dev_err(mdwc->dev,
784 "%s: trying to queue dbm request %p to control ep %s\n",
785 __func__, request, ep->name);
786 ret = -EPERM;
787 goto err;
788 }
789
790
Mayank Rana83ad5822016-08-09 14:17:22 -0700791 if (dep->trb_dequeue != dep->trb_enqueue ||
792 !list_empty(&dep->pending_list)
793 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700794 dev_err(mdwc->dev,
795 "%s: trying to queue dbm request %p tp ep %s\n",
796 __func__, request, ep->name);
797 ret = -EPERM;
798 goto err;
799 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700800 dep->trb_dequeue = 0;
801 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700802 }
803
804 ret = __dwc3_msm_ep_queue(dep, req);
805 if (ret < 0) {
806 dev_err(mdwc->dev,
807 "error %d after calling __dwc3_msm_ep_queue\n", ret);
808 goto err;
809 }
810
811 spin_unlock_irqrestore(&dwc->lock, flags);
812 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
813 dbm_set_speed(mdwc->dbm, (u8)superspeed);
814
815 return 0;
816
817err:
818 spin_unlock_irqrestore(&dwc->lock, flags);
819 kfree(req_complete);
820 return ret;
821}
822
823/*
824* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
825*
826* @usb_ep - pointer to usb_ep instance.
827*
828* @return int - XferRscIndex
829*/
830static inline int gsi_get_xfer_index(struct usb_ep *ep)
831{
832 struct dwc3_ep *dep = to_dwc3_ep(ep);
833
834 return dep->resource_index;
835}
836
837/*
838* Fills up the GSI channel information needed in call to IPA driver
839* for GSI channel creation.
840*
841* @usb_ep - pointer to usb_ep instance.
842* @ch_info - output parameter with requested channel info
843*/
844static void gsi_get_channel_info(struct usb_ep *ep,
845 struct gsi_channel_info *ch_info)
846{
847 struct dwc3_ep *dep = to_dwc3_ep(ep);
848 int last_trb_index = 0;
849 struct dwc3 *dwc = dep->dwc;
850 struct usb_gsi_request *request = ch_info->ch_req;
851
852 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
853 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Ranaac776d12017-04-18 16:56:13 -0700854 DWC3_DEP_BASE(dep->number) + DWC3_DEPCMD);
855
Mayank Rana511f3b22016-08-02 12:00:11 -0700856 ch_info->depcmd_hi_addr = 0;
857
858 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
859 &dep->trb_pool[0]);
860 /* Convert to multipled of 1KB */
861 ch_info->const_buffer_size = request->buf_len/1024;
862
863 /* IN direction */
864 if (dep->direction) {
865 /*
866 * Multiply by size of each TRB for xfer_ring_len in bytes.
867 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
868 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
869 */
870 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
871 last_trb_index = 2 * request->num_bufs + 2;
872 } else { /* OUT direction */
873 /*
874 * Multiply by size of each TRB for xfer_ring_len in bytes.
875 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
876 * LINK TRB.
877 */
Mayank Rana64d136b2016-11-01 21:01:34 -0700878 ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10;
879 last_trb_index = request->num_bufs + 2;
Mayank Rana511f3b22016-08-02 12:00:11 -0700880 }
881
882 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
883 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
884 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
885 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
886 DWC3_GEVNTCOUNT(ep->ep_intr_num));
887 ch_info->gevntcount_hi_addr = 0;
888
889 dev_dbg(dwc->dev,
890 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
891 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
892 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
893}
894
895/*
896* Perform StartXfer on GSI EP. Stores XferRscIndex.
897*
898* @usb_ep - pointer to usb_ep instance.
899*
900* @return int - 0 on success
901*/
902static int gsi_startxfer_for_ep(struct usb_ep *ep)
903{
904 int ret;
905 struct dwc3_gadget_ep_cmd_params params;
906 u32 cmd;
907 struct dwc3_ep *dep = to_dwc3_ep(ep);
908 struct dwc3 *dwc = dep->dwc;
909
910 memset(&params, 0, sizeof(params));
911 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
912 params.param0 |= (ep->ep_intr_num << 16);
913 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
914 &dep->trb_pool[0]));
915 cmd = DWC3_DEPCMD_STARTTRANSFER;
916 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700917 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700918
919 if (ret < 0)
920 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700921 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700922 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
923 return ret;
924}
925
926/*
927* Store Ring Base and Doorbell Address for GSI EP
928* for GSI channel creation.
929*
930* @usb_ep - pointer to usb_ep instance.
931* @dbl_addr - Doorbell address obtained from IPA driver
932*/
933static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
934{
935 struct dwc3_ep *dep = to_dwc3_ep(ep);
936 struct dwc3 *dwc = dep->dwc;
937 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
938 int n = ep->ep_intr_num - 1;
939
940 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
941 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
942 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
943
944 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
945 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
946 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
947 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
948}
949
950/*
Mayank Rana64d136b2016-11-01 21:01:34 -0700951* Rings Doorbell for GSI Channel
Mayank Rana511f3b22016-08-02 12:00:11 -0700952*
953* @usb_ep - pointer to usb_ep instance.
954* @request - pointer to GSI request. This is used to pass in the
955* address of the GSI doorbell obtained from IPA driver
956*/
Mayank Rana64d136b2016-11-01 21:01:34 -0700957static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
Mayank Rana511f3b22016-08-02 12:00:11 -0700958{
959 void __iomem *gsi_dbl_address_lsb;
960 void __iomem *gsi_dbl_address_msb;
961 dma_addr_t offset;
962 u64 dbl_addr = *((u64 *)request->buf_base_addr);
963 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
964 u32 dbl_hi_addr = (dbl_addr >> 32);
Mayank Rana511f3b22016-08-02 12:00:11 -0700965 struct dwc3_ep *dep = to_dwc3_ep(ep);
966 struct dwc3 *dwc = dep->dwc;
967 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Rana64d136b2016-11-01 21:01:34 -0700968 int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2)
969 : (request->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -0700970
971 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
972 dbl_lo_addr, sizeof(u32));
973 if (!gsi_dbl_address_lsb)
974 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
975
976 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
977 dbl_hi_addr, sizeof(u32));
978 if (!gsi_dbl_address_msb)
979 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
980
981 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
Mayank Rana64d136b2016-11-01 21:01:34 -0700982 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
983 &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
Mayank Rana511f3b22016-08-02 12:00:11 -0700984
985 writel_relaxed(offset, gsi_dbl_address_lsb);
986 writel_relaxed(0, gsi_dbl_address_msb);
987}
988
989/*
990* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
991*
992* @usb_ep - pointer to usb_ep instance.
993* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
994*
995* @return int - 0 on success
996*/
997static int gsi_updatexfer_for_ep(struct usb_ep *ep,
998 struct usb_gsi_request *request)
999{
1000 int i;
1001 int ret;
1002 u32 cmd;
1003 int num_trbs = request->num_bufs + 1;
1004 struct dwc3_trb *trb;
1005 struct dwc3_gadget_ep_cmd_params params;
1006 struct dwc3_ep *dep = to_dwc3_ep(ep);
1007 struct dwc3 *dwc = dep->dwc;
1008
1009 for (i = 0; i < num_trbs - 1; i++) {
1010 trb = &dep->trb_pool[i];
1011 trb->ctrl |= DWC3_TRB_CTRL_HWO;
1012 }
1013
1014 memset(&params, 0, sizeof(params));
1015 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1016 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -07001017 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001018 dep->flags |= DWC3_EP_BUSY;
1019 if (ret < 0)
1020 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
1021 return ret;
1022}
1023
1024/*
1025* Perform EndXfer on particular GSI EP.
1026*
1027* @usb_ep - pointer to usb_ep instance.
1028*/
1029static void gsi_endxfer_for_ep(struct usb_ep *ep)
1030{
1031 struct dwc3_ep *dep = to_dwc3_ep(ep);
1032 struct dwc3 *dwc = dep->dwc;
1033
1034 dwc3_stop_active_transfer(dwc, dep->number, true);
1035}
1036
1037/*
1038* Allocates and configures TRBs for GSI EPs.
1039*
1040* @usb_ep - pointer to usb_ep instance.
1041* @request - pointer to GSI request.
1042*
1043* @return int - 0 on success
1044*/
1045static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1046{
1047 int i = 0;
1048 dma_addr_t buffer_addr = req->dma;
1049 struct dwc3_ep *dep = to_dwc3_ep(ep);
1050 struct dwc3 *dwc = dep->dwc;
1051 struct dwc3_trb *trb;
1052 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
Mayank Rana64d136b2016-11-01 21:01:34 -07001053 : (req->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -07001054
Mayank Ranae0a427e2017-09-18 16:56:26 -07001055 dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
1056 num_trbs * sizeof(struct dwc3_trb),
1057 &dep->trb_pool_dma, GFP_KERNEL);
1058
1059 if (!dep->trb_pool) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001060 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1061 dep->name);
1062 return -ENOMEM;
1063 }
1064
1065 dep->num_trbs = num_trbs;
Mayank Rana511f3b22016-08-02 12:00:11 -07001066 /* IN direction */
1067 if (dep->direction) {
1068 for (i = 0; i < num_trbs ; i++) {
1069 trb = &dep->trb_pool[i];
1070 memset(trb, 0, sizeof(*trb));
1071 /* Set up first n+1 TRBs for ZLPs */
1072 if (i < (req->num_bufs + 1)) {
1073 trb->bpl = 0;
1074 trb->bph = 0;
1075 trb->size = 0;
1076 trb->ctrl = DWC3_TRBCTL_NORMAL
1077 | DWC3_TRB_CTRL_IOC;
1078 continue;
1079 }
1080
1081 /* Setup n TRBs pointing to valid buffers */
1082 trb->bpl = lower_32_bits(buffer_addr);
1083 trb->bph = 0;
1084 trb->size = 0;
1085 trb->ctrl = DWC3_TRBCTL_NORMAL
1086 | DWC3_TRB_CTRL_IOC;
1087 buffer_addr += req->buf_len;
1088
1089 /* Set up the Link TRB at the end */
1090 if (i == (num_trbs - 1)) {
1091 trb->bpl = dwc3_trb_dma_offset(dep,
1092 &dep->trb_pool[0]);
1093 trb->bph = (1 << 23) | (1 << 21)
1094 | (ep->ep_intr_num << 16);
1095 trb->size = 0;
1096 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1097 | DWC3_TRB_CTRL_HWO;
1098 }
1099 }
1100 } else { /* OUT direction */
1101
1102 for (i = 0; i < num_trbs ; i++) {
1103
1104 trb = &dep->trb_pool[i];
1105 memset(trb, 0, sizeof(*trb));
Mayank Rana64d136b2016-11-01 21:01:34 -07001106 /* Setup LINK TRB to start with TRB ring */
1107 if (i == 0) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001108 trb->bpl = dwc3_trb_dma_offset(dep,
Mayank Rana64d136b2016-11-01 21:01:34 -07001109 &dep->trb_pool[1]);
1110 trb->ctrl = DWC3_TRBCTL_LINK_TRB;
1111 } else if (i == (num_trbs - 1)) {
1112 /* Set up the Link TRB at the end */
1113 trb->bpl = dwc3_trb_dma_offset(dep,
1114 &dep->trb_pool[0]);
Mayank Rana511f3b22016-08-02 12:00:11 -07001115 trb->bph = (1 << 23) | (1 << 21)
1116 | (ep->ep_intr_num << 16);
Mayank Rana511f3b22016-08-02 12:00:11 -07001117 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1118 | DWC3_TRB_CTRL_HWO;
Mayank Rana64d136b2016-11-01 21:01:34 -07001119 } else {
1120 trb->bpl = lower_32_bits(buffer_addr);
1121 trb->size = req->buf_len;
1122 buffer_addr += req->buf_len;
1123 trb->ctrl = DWC3_TRBCTL_NORMAL
1124 | DWC3_TRB_CTRL_IOC
1125 | DWC3_TRB_CTRL_CSP
1126 | DWC3_TRB_CTRL_ISP_IMI;
Mayank Rana511f3b22016-08-02 12:00:11 -07001127 }
1128 }
1129 }
Mayank Rana64d136b2016-11-01 21:01:34 -07001130
1131 pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
1132 trb = &dep->trb_pool[0];
1133 if (trb) {
1134 for (i = 0; i < num_trbs; i++) {
1135 pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
1136 i, (unsigned long)dwc3_trb_dma_offset(dep,
1137 &dep->trb_pool[i]), trb->bpl, trb->bph,
1138 trb->size, trb->ctrl);
1139 trb++;
1140 }
1141 }
1142
Mayank Rana511f3b22016-08-02 12:00:11 -07001143 return 0;
1144}
1145
1146/*
1147* Frees TRBs for GSI EPs.
1148*
1149* @usb_ep - pointer to usb_ep instance.
1150*
1151*/
1152static void gsi_free_trbs(struct usb_ep *ep)
1153{
1154 struct dwc3_ep *dep = to_dwc3_ep(ep);
Mayank Ranae0a427e2017-09-18 16:56:26 -07001155 struct dwc3 *dwc = dep->dwc;
Mayank Rana511f3b22016-08-02 12:00:11 -07001156
1157 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1158 return;
1159
1160 /* Free TRBs and TRB pool for EP */
Mayank Ranae0a427e2017-09-18 16:56:26 -07001161 if (dep->trb_pool_dma) {
1162 dma_free_coherent(dwc->sysdev,
1163 dep->num_trbs * sizeof(struct dwc3_trb),
1164 dep->trb_pool,
1165 dep->trb_pool_dma);
Mayank Rana511f3b22016-08-02 12:00:11 -07001166 dep->trb_pool = NULL;
1167 dep->trb_pool_dma = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07001168 }
1169}
1170/*
1171* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1172*
1173* @usb_ep - pointer to usb_ep instance.
1174* @request - pointer to GSI request.
1175*/
1176static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1177{
1178 struct dwc3_ep *dep = to_dwc3_ep(ep);
1179 struct dwc3 *dwc = dep->dwc;
1180 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1181 struct dwc3_gadget_ep_cmd_params params;
1182 const struct usb_endpoint_descriptor *desc = ep->desc;
1183 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
Mayank Ranaac1200c2017-04-25 13:48:46 -07001184 u32 reg;
1185 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001186
1187 memset(&params, 0x00, sizeof(params));
1188
1189 /* Configure GSI EP */
1190 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1191 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1192
1193 /* Burst size is only needed in SuperSpeed mode */
1194 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1195 u32 burst = dep->endpoint.maxburst - 1;
1196
1197 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1198 }
1199
1200 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1201 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1202 | DWC3_DEPCFG_STREAM_EVENT_EN;
1203 dep->stream_capable = true;
1204 }
1205
1206 /* Set EP number */
1207 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1208
1209 /* Set interrupter number for GSI endpoints */
1210 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1211
1212 /* Enable XferInProgress and XferComplete Interrupts */
1213 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1214 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1215 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1216 /*
1217 * We must use the lower 16 TX FIFOs even though
1218 * HW might have more
1219 */
1220 /* Remove FIFO Number for GSI EP*/
1221 if (dep->direction)
1222 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1223
1224 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1225
1226 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1227 params.param0, params.param1, params.param2, dep->name);
1228
Mayank Rana83ad5822016-08-09 14:17:22 -07001229 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001230
1231 /* Set XferRsc Index for GSI EP */
1232 if (!(dep->flags & DWC3_EP_ENABLED)) {
Mayank Ranaac1200c2017-04-25 13:48:46 -07001233 ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
1234 if (ret)
1235 return;
1236
Mayank Rana511f3b22016-08-02 12:00:11 -07001237 memset(&params, 0x00, sizeof(params));
1238 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001239 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001240 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1241
1242 dep->endpoint.desc = desc;
1243 dep->comp_desc = comp_desc;
1244 dep->type = usb_endpoint_type(desc);
1245 dep->flags |= DWC3_EP_ENABLED;
1246 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1247 reg |= DWC3_DALEPENA_EP(dep->number);
1248 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1249 }
1250
1251}
1252
1253/*
1254* Enables USB wrapper for GSI
1255*
1256* @usb_ep - pointer to usb_ep instance.
1257*/
1258static void gsi_enable(struct usb_ep *ep)
1259{
1260 struct dwc3_ep *dep = to_dwc3_ep(ep);
1261 struct dwc3 *dwc = dep->dwc;
1262 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1263
1264 dwc3_msm_write_reg_field(mdwc->base,
1265 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1266 dwc3_msm_write_reg_field(mdwc->base,
1267 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1268 dwc3_msm_write_reg_field(mdwc->base,
1269 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1270 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1271 dwc3_msm_write_reg_field(mdwc->base,
1272 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1273}
1274
1275/*
1276* Block or allow doorbell towards GSI
1277*
1278* @usb_ep - pointer to usb_ep instance.
1279* @request - pointer to GSI request. In this case num_bufs is used as a bool
1280* to set or clear the doorbell bit
1281*/
1282static void gsi_set_clear_dbell(struct usb_ep *ep,
1283 bool block_db)
1284{
1285
1286 struct dwc3_ep *dep = to_dwc3_ep(ep);
1287 struct dwc3 *dwc = dep->dwc;
1288 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1289
1290 dwc3_msm_write_reg_field(mdwc->base,
1291 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1292}
1293
1294/*
1295* Performs necessary checks before stopping GSI channels
1296*
1297* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1298*/
1299static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1300{
1301 u32 timeout = 1500;
1302 u32 reg = 0;
1303 struct dwc3_ep *dep = to_dwc3_ep(ep);
1304 struct dwc3 *dwc = dep->dwc;
1305 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1306
1307 while (dwc3_msm_read_reg_field(mdwc->base,
1308 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1309 if (!timeout--) {
1310 dev_err(mdwc->dev,
1311 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1312 return false;
1313 }
1314 }
1315 /* Check for U3 only if we are not handling Function Suspend */
1316 if (!f_suspend) {
1317 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1318 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1319 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1320 return false;
1321 }
1322 }
1323
1324 return true;
1325}
1326
1327
1328/**
1329* Performs GSI operations or GSI EP related operations.
1330*
1331* @usb_ep - pointer to usb_ep instance.
1332* @op_data - pointer to opcode related data.
1333* @op - GSI related or GSI EP related op code.
1334*
1335* @return int - 0 on success, negative on error.
1336* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1337*/
1338static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1339 void *op_data, enum gsi_ep_op op)
1340{
1341 u32 ret = 0;
1342 struct dwc3_ep *dep = to_dwc3_ep(ep);
1343 struct dwc3 *dwc = dep->dwc;
1344 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1345 struct usb_gsi_request *request;
1346 struct gsi_channel_info *ch_info;
1347 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001348 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001349
1350 switch (op) {
1351 case GSI_EP_OP_PREPARE_TRBS:
1352 request = (struct usb_gsi_request *)op_data;
1353 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1354 ret = gsi_prepare_trbs(ep, request);
1355 break;
1356 case GSI_EP_OP_FREE_TRBS:
1357 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1358 gsi_free_trbs(ep);
1359 break;
1360 case GSI_EP_OP_CONFIG:
1361 request = (struct usb_gsi_request *)op_data;
1362 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001363 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001364 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001365 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001366 break;
1367 case GSI_EP_OP_STARTXFER:
1368 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001369 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001370 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001371 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001372 break;
1373 case GSI_EP_OP_GET_XFER_IDX:
1374 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1375 ret = gsi_get_xfer_index(ep);
1376 break;
1377 case GSI_EP_OP_STORE_DBL_INFO:
1378 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1379 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1380 break;
1381 case GSI_EP_OP_ENABLE_GSI:
1382 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1383 gsi_enable(ep);
1384 break;
1385 case GSI_EP_OP_GET_CH_INFO:
1386 ch_info = (struct gsi_channel_info *)op_data;
1387 gsi_get_channel_info(ep, ch_info);
1388 break;
Mayank Rana64d136b2016-11-01 21:01:34 -07001389 case GSI_EP_OP_RING_DB:
Mayank Rana511f3b22016-08-02 12:00:11 -07001390 request = (struct usb_gsi_request *)op_data;
Mayank Rana64d136b2016-11-01 21:01:34 -07001391 dbg_print(0xFF, "RING_DB", 0, ep->name);
1392 gsi_ring_db(ep, request);
Mayank Rana511f3b22016-08-02 12:00:11 -07001393 break;
1394 case GSI_EP_OP_UPDATEXFER:
1395 request = (struct usb_gsi_request *)op_data;
1396 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001397 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001398 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001399 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001400 break;
1401 case GSI_EP_OP_ENDXFER:
1402 request = (struct usb_gsi_request *)op_data;
1403 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001404 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001405 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001406 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001407 break;
1408 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1409 block_db = *((bool *)op_data);
1410 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1411 block_db);
1412 gsi_set_clear_dbell(ep, block_db);
1413 break;
1414 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1415 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1416 f_suspend = *((bool *)op_data);
1417 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1418 break;
1419 case GSI_EP_OP_DISABLE:
1420 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1421 ret = ep->ops->disable(ep);
1422 break;
1423 default:
1424 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1425 }
1426
1427 return ret;
1428}
1429
1430/**
1431 * Configure MSM endpoint.
1432 * This function do specific configurations
1433 * to an endpoint which need specific implementaion
1434 * in the MSM architecture.
1435 *
1436 * This function should be called by usb function/class
1437 * layer which need a support from the specific MSM HW
1438 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1439 *
1440 * @ep - a pointer to some usb_ep instance
1441 *
1442 * @return int - 0 on success, negetive on error.
1443 */
1444int msm_ep_config(struct usb_ep *ep)
1445{
1446 struct dwc3_ep *dep = to_dwc3_ep(ep);
1447 struct dwc3 *dwc = dep->dwc;
1448 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1449 struct usb_ep_ops *new_ep_ops;
1450
1451
1452 /* Save original ep ops for future restore*/
1453 if (mdwc->original_ep_ops[dep->number]) {
1454 dev_err(mdwc->dev,
1455 "ep [%s,%d] already configured as msm endpoint\n",
1456 ep->name, dep->number);
1457 return -EPERM;
1458 }
1459 mdwc->original_ep_ops[dep->number] = ep->ops;
1460
1461 /* Set new usb ops as we like */
1462 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1463 if (!new_ep_ops)
1464 return -ENOMEM;
1465
1466 (*new_ep_ops) = (*ep->ops);
1467 new_ep_ops->queue = dwc3_msm_ep_queue;
1468 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1469 ep->ops = new_ep_ops;
1470
1471 /*
1472 * Do HERE more usb endpoint configurations
1473 * which are specific to MSM.
1474 */
1475
1476 return 0;
1477}
1478EXPORT_SYMBOL(msm_ep_config);
1479
1480/**
1481 * Un-configure MSM endpoint.
1482 * Tear down configurations done in the
1483 * dwc3_msm_ep_config function.
1484 *
1485 * @ep - a pointer to some usb_ep instance
1486 *
1487 * @return int - 0 on success, negative on error.
1488 */
1489int msm_ep_unconfig(struct usb_ep *ep)
1490{
1491 struct dwc3_ep *dep = to_dwc3_ep(ep);
1492 struct dwc3 *dwc = dep->dwc;
1493 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1494 struct usb_ep_ops *old_ep_ops;
1495
1496 /* Restore original ep ops */
1497 if (!mdwc->original_ep_ops[dep->number]) {
1498 dev_err(mdwc->dev,
1499 "ep [%s,%d] was not configured as msm endpoint\n",
1500 ep->name, dep->number);
1501 return -EINVAL;
1502 }
1503 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1504 ep->ops = mdwc->original_ep_ops[dep->number];
1505 mdwc->original_ep_ops[dep->number] = NULL;
1506 kfree(old_ep_ops);
1507
1508 /*
1509 * Do HERE more usb endpoint un-configurations
1510 * which are specific to MSM.
1511 */
1512
1513 return 0;
1514}
1515EXPORT_SYMBOL(msm_ep_unconfig);
1516#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1517
1518static void dwc3_resume_work(struct work_struct *w);
1519
1520static void dwc3_restart_usb_work(struct work_struct *w)
1521{
1522 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1523 restart_usb_work);
1524 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1525 unsigned int timeout = 50;
1526
1527 dev_dbg(mdwc->dev, "%s\n", __func__);
1528
1529 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1530 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1531 return;
1532 }
1533
1534 /* guard against concurrent VBUS handling */
1535 mdwc->in_restart = true;
1536
1537 if (!mdwc->vbus_active) {
1538 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1539 dwc->err_evt_seen = false;
1540 mdwc->in_restart = false;
1541 return;
1542 }
1543
Mayank Rana08e41922017-03-02 15:25:48 -08001544 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001545 /* Reset active USB connection */
1546 dwc3_resume_work(&mdwc->resume_work);
1547
1548 /* Make sure disconnect is processed before sending connect */
1549 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1550 msleep(20);
1551
1552 if (!timeout) {
1553 dev_dbg(mdwc->dev,
1554 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001555 dbg_event(0xFF, "ReStart:RT SUSP",
1556 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001557 pm_runtime_suspend(mdwc->dev);
1558 }
1559
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301560 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001561 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301562 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001563 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001564
1565 dwc->err_evt_seen = false;
1566 flush_delayed_work(&mdwc->sm_work);
1567}
1568
Manu Gautam976fdfc2016-08-18 09:27:35 +05301569static int msm_dwc3_usbdev_notify(struct notifier_block *self,
1570 unsigned long action, void *priv)
1571{
1572 struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
1573 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1574 struct usb_bus *bus = priv;
1575
1576 /* Interested only in recovery when HC dies */
1577 if (action != USB_BUS_DIED)
1578 return 0;
1579
1580 dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
1581 /* Recovery already under process */
1582 if (mdwc->hc_died)
1583 return 0;
1584
1585 if (bus->controller != &dwc->xhci->dev) {
1586 dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
1587 return 0;
1588 }
1589
1590 mdwc->hc_died = true;
1591 schedule_delayed_work(&mdwc->sm_work, 0);
1592 return 0;
1593}
1594
1595
Mayank Rana511f3b22016-08-02 12:00:11 -07001596/*
1597 * Check whether the DWC3 requires resetting the ep
1598 * after going to Low Power Mode (lpm)
1599 */
1600bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1601{
1602 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1603 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1604
1605 return dbm_reset_ep_after_lpm(mdwc->dbm);
1606}
1607EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1608
1609/*
1610 * Config Global Distributed Switch Controller (GDSC)
1611 * to support controller power collapse
1612 */
1613static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1614{
1615 int ret;
1616
1617 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1618 return -EPERM;
1619
1620 if (on) {
1621 ret = regulator_enable(mdwc->dwc3_gdsc);
1622 if (ret) {
1623 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1624 return ret;
1625 }
1626 } else {
1627 ret = regulator_disable(mdwc->dwc3_gdsc);
1628 if (ret) {
1629 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1630 return ret;
1631 }
1632 }
1633
1634 return ret;
1635}
1636
1637static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1638{
1639 int ret = 0;
1640
1641 if (assert) {
Mayank Ranad339abe2017-05-31 09:19:49 -07001642 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001643 /* Using asynchronous block reset to the hardware */
1644 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1645 clk_disable_unprepare(mdwc->utmi_clk);
1646 clk_disable_unprepare(mdwc->sleep_clk);
1647 clk_disable_unprepare(mdwc->core_clk);
1648 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301649 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001650 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301651 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001652 } else {
1653 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301654 ret = reset_control_deassert(mdwc->core_reset);
1655 if (ret)
1656 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001657 ndelay(200);
1658 clk_prepare_enable(mdwc->iface_clk);
1659 clk_prepare_enable(mdwc->core_clk);
1660 clk_prepare_enable(mdwc->sleep_clk);
1661 clk_prepare_enable(mdwc->utmi_clk);
Mayank Ranad339abe2017-05-31 09:19:49 -07001662 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001663 }
1664
1665 return ret;
1666}
1667
1668static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1669{
1670 u32 guctl, gfladj = 0;
1671
1672 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1673 guctl &= ~DWC3_GUCTL_REFCLKPER;
1674
1675 /* GFLADJ register is used starting with revision 2.50a */
1676 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1677 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1678 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1679 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1680 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1681 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1682 }
1683
1684 /* Refer to SNPS Databook Table 6-55 for calculations used */
1685 switch (mdwc->utmi_clk_rate) {
1686 case 19200000:
1687 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1688 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1689 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1690 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1691 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1692 break;
1693 case 24000000:
1694 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1695 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1696 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1697 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1698 break;
1699 default:
1700 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1701 mdwc->utmi_clk_rate);
1702 break;
1703 }
1704
1705 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1706 if (gfladj)
1707 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1708}
1709
1710/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1711static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1712{
1713 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1714 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1715 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1716 BIT(2), 1);
1717
1718 /*
1719 * Enable master clock for RAMs to allow BAM to access RAMs when
1720 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1721 * are seen where RAM clocks get turned OFF in SS mode
1722 */
1723 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1724 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1725
1726}
1727
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001728static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1729{
1730 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1731 vbus_draw_work);
1732 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1733
1734 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1735}
1736
Mayank Rana511f3b22016-08-02 12:00:11 -07001737static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1738{
1739 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001740 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001741 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001742 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001743
1744 switch (event) {
1745 case DWC3_CONTROLLER_ERROR_EVENT:
1746 dev_info(mdwc->dev,
1747 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1748 dwc->irq_cnt);
1749
1750 dwc3_gadget_disable_irq(dwc);
1751
1752 /* prevent core from generating interrupts until recovery */
1753 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1754 reg |= DWC3_GCTL_CORESOFTRESET;
1755 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1756
1757 /* restart USB which performs full reset and reconnect */
1758 schedule_work(&mdwc->restart_usb_work);
1759 break;
1760 case DWC3_CONTROLLER_RESET_EVENT:
1761 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1762 /* HS & SSPHYs get reset as part of core soft reset */
1763 dwc3_msm_qscratch_reg_init(mdwc);
1764 break;
1765 case DWC3_CONTROLLER_POST_RESET_EVENT:
1766 dev_dbg(mdwc->dev,
1767 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1768
1769 /*
1770 * Below sequence is used when controller is working without
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301771 * having ssphy and only USB high/full speed is supported.
Mayank Rana511f3b22016-08-02 12:00:11 -07001772 */
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301773 if (dwc->maximum_speed == USB_SPEED_HIGH ||
1774 dwc->maximum_speed == USB_SPEED_FULL) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001775 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1776 dwc3_msm_read_reg(mdwc->base,
1777 QSCRATCH_GENERAL_CFG)
1778 | PIPE_UTMI_CLK_DIS);
1779
1780 usleep_range(2, 5);
1781
1782
1783 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1784 dwc3_msm_read_reg(mdwc->base,
1785 QSCRATCH_GENERAL_CFG)
1786 | PIPE_UTMI_CLK_SEL
1787 | PIPE3_PHYSTATUS_SW);
1788
1789 usleep_range(2, 5);
1790
1791 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1792 dwc3_msm_read_reg(mdwc->base,
1793 QSCRATCH_GENERAL_CFG)
1794 & ~PIPE_UTMI_CLK_DIS);
1795 }
1796
1797 dwc3_msm_update_ref_clk(mdwc);
1798 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1799 break;
1800 case DWC3_CONTROLLER_CONNDONE_EVENT:
1801 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1802 /*
1803 * Add power event if the dbm indicates coming out of L1 by
1804 * interrupt
1805 */
1806 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1807 dwc3_msm_write_reg_field(mdwc->base,
1808 PWR_EVNT_IRQ_MASK_REG,
1809 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1810
1811 atomic_set(&dwc->in_lpm, 0);
1812 break;
1813 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1814 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1815 if (dwc->enable_bus_suspend) {
1816 mdwc->suspend = dwc->b_suspend;
1817 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1818 }
1819 break;
1820 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1821 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001822 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001823 break;
1824 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1825 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001826 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001827 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001828 case DWC3_GSI_EVT_BUF_ALLOC:
1829 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1830
1831 if (!mdwc->num_gsi_event_buffers)
1832 break;
1833
1834 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1835 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1836 GFP_KERNEL);
1837 if (!mdwc->gsi_ev_buff) {
1838 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1839 break;
1840 }
1841
1842 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1843
1844 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1845 if (!evt)
1846 break;
1847 evt->dwc = dwc;
1848 evt->length = DWC3_EVENT_BUFFERS_SIZE;
Mayank Rana0e4c4432017-09-18 16:46:00 -07001849 evt->buf = dma_alloc_coherent(dwc->sysdev,
Mayank Ranaf4918d32016-12-15 13:35:55 -08001850 DWC3_EVENT_BUFFERS_SIZE,
1851 &evt->dma, GFP_KERNEL);
1852 if (!evt->buf) {
1853 dev_err(dwc->dev,
1854 "can't allocate gsi_evt_buf(%d)\n", i);
1855 break;
1856 }
1857 mdwc->gsi_ev_buff[i] = evt;
1858 }
1859 break;
1860 case DWC3_GSI_EVT_BUF_SETUP:
1861 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1862 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1863 evt = mdwc->gsi_ev_buff[i];
Mayank Rana0eb0db72017-10-03 13:46:32 -07001864 if (!evt)
1865 break;
1866
Mayank Ranaf4918d32016-12-15 13:35:55 -08001867 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1868 evt->buf, (unsigned long long) evt->dma,
1869 evt->length);
1870 memset(evt->buf, 0, evt->length);
1871 evt->lpos = 0;
1872 /*
1873 * Primary event buffer is programmed with registers
1874 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1875 * program USB GSI related event buffer with DWC3
1876 * controller.
1877 */
1878 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1879 lower_32_bits(evt->dma));
1880 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1881 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1882 DWC3_GEVENT_TYPE_GSI) |
1883 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1884 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1885 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1886 ((evt->length) & 0xffff));
1887 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1888 }
1889 break;
1890 case DWC3_GSI_EVT_BUF_CLEANUP:
1891 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
Mayank Rana0eb0db72017-10-03 13:46:32 -07001892 if (!mdwc->gsi_ev_buff)
1893 break;
1894
Mayank Ranaf4918d32016-12-15 13:35:55 -08001895 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1896 evt = mdwc->gsi_ev_buff[i];
1897 evt->lpos = 0;
1898 /*
1899 * Primary event buffer is programmed with registers
1900 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1901 * program USB GSI related event buffer with DWC3
1902 * controller.
1903 */
1904 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1905 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1906 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1907 DWC3_GEVNTSIZ_INTMASK |
1908 DWC3_GEVNTSIZ_SIZE((i+1)));
1909 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1910 }
1911 break;
1912 case DWC3_GSI_EVT_BUF_FREE:
1913 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
Mayank Rana0eb0db72017-10-03 13:46:32 -07001914 if (!mdwc->gsi_ev_buff)
1915 break;
1916
Mayank Ranaf4918d32016-12-15 13:35:55 -08001917 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1918 evt = mdwc->gsi_ev_buff[i];
1919 if (evt)
Mayank Rana0e4c4432017-09-18 16:46:00 -07001920 dma_free_coherent(dwc->sysdev, evt->length,
Mayank Ranaf4918d32016-12-15 13:35:55 -08001921 evt->buf, evt->dma);
1922 }
1923 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001924 default:
1925 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1926 break;
1927 }
1928}
1929
1930static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1931{
1932 int ret = 0;
1933
1934 if (core_reset) {
1935 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1936 if (ret)
1937 return;
1938
1939 usleep_range(1000, 1200);
1940 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1941 if (ret)
1942 return;
1943
1944 usleep_range(10000, 12000);
1945 }
1946
1947 if (mdwc->dbm) {
1948 /* Reset the DBM */
1949 dbm_soft_reset(mdwc->dbm, 1);
1950 usleep_range(1000, 1200);
1951 dbm_soft_reset(mdwc->dbm, 0);
1952
1953 /*enable DBM*/
1954 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1955 DBM_EN_MASK, 0x1);
1956 dbm_enable(mdwc->dbm);
1957 }
1958}
1959
1960static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1961{
1962 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1963 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301964 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001965
1966 /* Configure AHB2PHY for one wait state read/write */
1967 if (mdwc->ahb2phy_base) {
1968 clk_prepare_enable(mdwc->cfg_ahb_clk);
1969 val = readl_relaxed(mdwc->ahb2phy_base +
1970 PERIPH_SS_AHB2PHY_TOP_CFG);
1971 if (val != ONE_READ_WRITE_WAIT) {
1972 writel_relaxed(ONE_READ_WRITE_WAIT,
1973 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1974 /* complete above write before configuring USB PHY. */
1975 mb();
1976 }
1977 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1978 }
1979
1980 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001981 dbg_event(0xFF, "dwc3 init",
1982 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301983 ret = dwc3_core_pre_init(dwc);
1984 if (ret) {
1985 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1986 return;
1987 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001988 mdwc->init = true;
1989 }
1990
1991 dwc3_core_init(dwc);
1992 /* Re-configure event buffers */
1993 dwc3_event_buffers_setup(dwc);
1994}
1995
1996static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
1997{
1998 unsigned long timeout;
1999 u32 reg = 0;
2000
2001 if ((mdwc->in_host_mode || mdwc->vbus_active)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05302002 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002003 if (!atomic_read(&mdwc->in_p3)) {
2004 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
2005 return -EBUSY;
2006 }
2007 }
2008
2009 /* Clear previous L2 events */
2010 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2011 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
2012
2013 /* Prepare HSPHY for suspend */
2014 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
2015 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2016 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
2017
2018 /* Wait for PHY to go into L2 */
2019 timeout = jiffies + msecs_to_jiffies(5);
2020 while (!time_after(jiffies, timeout)) {
2021 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2022 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
2023 break;
2024 }
2025 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
2026 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
2027
2028 /* Clear L2 event bit */
2029 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2030 PWR_EVNT_LPM_IN_L2_MASK);
2031
2032 return 0;
2033}
2034
Mayank Rana511f3b22016-08-02 12:00:11 -07002035static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
2036{
2037 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2038 int i, num_ports;
2039 u32 reg;
2040
2041 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2042 if (mdwc->in_host_mode) {
2043 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
2044 num_ports = HCS_MAX_PORTS(reg);
2045 for (i = 0; i < num_ports; i++) {
2046 reg = dwc3_msm_read_reg(mdwc->base,
2047 USB3_PORTSC + i*0x10);
2048 if (reg & PORT_PE) {
2049 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
2050 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2051 else if (DEV_LOWSPEED(reg))
2052 mdwc->hs_phy->flags |= PHY_LS_MODE;
2053 }
2054 }
2055 } else {
2056 if (dwc->gadget.speed == USB_SPEED_HIGH ||
2057 dwc->gadget.speed == USB_SPEED_FULL)
2058 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2059 else if (dwc->gadget.speed == USB_SPEED_LOW)
2060 mdwc->hs_phy->flags |= PHY_LS_MODE;
2061 }
2062}
2063
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302064static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
2065 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07002066
Mayank Ranad339abe2017-05-31 09:19:49 -07002067static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc,
2068 struct usb_irq *uirq, unsigned int polarity, bool enable)
2069{
2070 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2071
2072 if (uirq && enable && !uirq->enable) {
2073 dbg_event(0xFF, "PDC_IRQ_EN", uirq->irq);
2074 dbg_event(0xFF, "PDC_IRQ_POL", polarity);
2075 /* clear any pending interrupt */
2076 irq_set_irqchip_state(uirq->irq, IRQCHIP_STATE_PENDING, 0);
2077 irq_set_irq_type(uirq->irq, polarity);
2078 enable_irq_wake(uirq->irq);
2079 enable_irq(uirq->irq);
2080 uirq->enable = true;
2081 }
2082
2083 if (uirq && !enable && uirq->enable) {
2084 dbg_event(0xFF, "PDC_IRQ_DIS", uirq->irq);
2085 disable_irq_wake(uirq->irq);
2086 disable_irq_nosync(uirq->irq);
2087 uirq->enable = false;
2088 }
2089}
2090
2091static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable)
2092{
2093 if (!enable)
2094 goto disable_usb_irq;
2095
2096 if (mdwc->hs_phy->flags & PHY_LS_MODE) {
2097 configure_usb_wakeup_interrupt(mdwc,
2098 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2099 IRQ_TYPE_EDGE_FALLING, enable);
2100 } else if (mdwc->hs_phy->flags & PHY_HSFS_MODE) {
2101 configure_usb_wakeup_interrupt(mdwc,
2102 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2103 IRQ_TYPE_EDGE_FALLING, enable);
2104 } else {
2105 configure_usb_wakeup_interrupt(mdwc,
2106 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2107 IRQ_TYPE_EDGE_RISING, true);
2108 configure_usb_wakeup_interrupt(mdwc,
2109 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2110 IRQ_TYPE_EDGE_RISING, true);
2111 }
2112
2113 configure_usb_wakeup_interrupt(mdwc,
2114 &mdwc->wakeup_irq[SS_PHY_IRQ],
2115 IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH, enable);
2116 return;
2117
2118disable_usb_irq:
2119 configure_usb_wakeup_interrupt(mdwc,
2120 &mdwc->wakeup_irq[DP_HS_PHY_IRQ], 0, enable);
2121 configure_usb_wakeup_interrupt(mdwc,
2122 &mdwc->wakeup_irq[DM_HS_PHY_IRQ], 0, enable);
2123 configure_usb_wakeup_interrupt(mdwc,
2124 &mdwc->wakeup_irq[SS_PHY_IRQ], 0, enable);
2125}
2126
2127static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
2128 struct usb_irq *uirq, bool enable)
2129{
2130 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2131
2132 if (uirq && enable && !uirq->enable) {
2133 dbg_event(0xFF, "IRQ_EN", uirq->irq);
2134 enable_irq_wake(uirq->irq);
2135 enable_irq(uirq->irq);
2136 uirq->enable = true;
2137 }
2138
2139 if (uirq && !enable && uirq->enable) {
2140 dbg_event(0xFF, "IRQ_DIS", uirq->irq);
2141 disable_irq_wake(uirq->irq);
2142 disable_irq_nosync(uirq->irq);
2143 uirq->enable = true;
2144 }
2145}
2146
Mayank Rana511f3b22016-08-02 12:00:11 -07002147static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
2148{
Mayank Rana83ad5822016-08-09 14:17:22 -07002149 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07002150 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07002151 struct dwc3_event_buffer *evt;
Mayank Ranad339abe2017-05-31 09:19:49 -07002152 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002153
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302154 mutex_lock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002155 if (atomic_read(&dwc->in_lpm)) {
2156 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302157 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002158 return 0;
2159 }
2160
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302161 cancel_delayed_work_sync(&mdwc->perf_vote_work);
2162 msm_dwc3_perf_vote_update(mdwc, false);
2163
Mayank Rana511f3b22016-08-02 12:00:11 -07002164 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07002165 evt = dwc->ev_buf;
2166 if ((evt->flags & DWC3_EVENT_PENDING)) {
2167 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002168 "%s: %d device events pending, abort suspend\n",
2169 __func__, evt->count / 4);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302170 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana83ad5822016-08-09 14:17:22 -07002171 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002172 }
2173 }
2174
2175 if (!mdwc->vbus_active && dwc->is_drd &&
2176 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2177 /*
2178 * In some cases, the pm_runtime_suspend may be called by
2179 * usb_bam when there is pending lpm flag. However, if this is
2180 * done when cable was disconnected and otg state has not
2181 * yet changed to IDLE, then it means OTG state machine
2182 * is running and we race against it. So cancel LPM for now,
2183 * and OTG state machine will go for LPM later, after completing
2184 * transition to IDLE state.
2185 */
2186 dev_dbg(mdwc->dev,
2187 "%s: cable disconnected while not in idle otg state\n",
2188 __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302189 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002190 return -EBUSY;
2191 }
2192
2193 /*
2194 * Check if device is not in CONFIGURED state
2195 * then check controller state of L2 and break
2196 * LPM sequence. Check this for device bus suspend case.
2197 */
2198 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2199 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2200 pr_err("%s(): Trying to go in LPM with state:%d\n",
2201 __func__, dwc->gadget.state);
2202 pr_err("%s(): LPM is not performed.\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302203 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002204 return -EBUSY;
2205 }
2206
2207 ret = dwc3_msm_prepare_suspend(mdwc);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302208 if (ret) {
2209 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002210 return ret;
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302211 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002212
Mayank Rana511f3b22016-08-02 12:00:11 -07002213 /* Disable core irq */
2214 if (dwc->irq)
2215 disable_irq(dwc->irq);
2216
Mayank Ranaf616a7f2017-03-20 16:10:39 -07002217 if (work_busy(&dwc->bh_work))
2218 dbg_event(0xFF, "pend evt", 0);
2219
Mayank Rana511f3b22016-08-02 12:00:11 -07002220 /* disable power event irq, hs and ss phy irq is used as wake up src */
Mayank Ranad339abe2017-05-31 09:19:49 -07002221 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07002222
2223 dwc3_set_phy_speed_flags(mdwc);
2224 /* Suspend HS PHY */
2225 usb_phy_set_suspend(mdwc->hs_phy, 1);
2226
2227 /* Suspend SS PHY */
Mayank Rana17f67e32017-08-15 10:41:28 -07002228 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002229 /* indicate phy about SS mode */
2230 if (dwc3_msm_is_superspeed(mdwc))
2231 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2232 usb_phy_set_suspend(mdwc->ss_phy, 1);
2233 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2234 }
2235
2236 /* make sure above writes are completed before turning off clocks */
2237 wmb();
2238
2239 /* Disable clocks */
2240 if (mdwc->bus_aggr_clk)
2241 clk_disable_unprepare(mdwc->bus_aggr_clk);
2242 clk_disable_unprepare(mdwc->utmi_clk);
2243
Hemant Kumar633dc332016-08-10 13:41:05 -07002244 /* Memory core: OFF, Memory periphery: OFF */
2245 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2246 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2247 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2248 }
2249
Mayank Rana511f3b22016-08-02 12:00:11 -07002250 clk_set_rate(mdwc->core_clk, 19200000);
2251 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302252 if (mdwc->noc_aggr_clk)
2253 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002254 /*
2255 * Disable iface_clk only after core_clk as core_clk has FSM
2256 * depedency on iface_clk. Hence iface_clk should be turned off
2257 * after core_clk is turned off.
2258 */
2259 clk_disable_unprepare(mdwc->iface_clk);
2260 /* USB PHY no more requires TCXO */
2261 clk_disable_unprepare(mdwc->xo_clk);
2262
2263 /* Perform controller power collapse */
Azhar Shaikh69f4c052016-02-11 11:00:58 -08002264 if (!mdwc->in_host_mode && (!mdwc->vbus_active || mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002265 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2266 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2267 dwc3_msm_config_gdsc(mdwc, 0);
2268 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002269
Jack Pham9faa51df2017-04-03 18:13:40 -07002270 if (mdwc->iommu_map) {
Jack Phambbe27962017-03-23 18:42:26 -07002271 arm_iommu_detach_device(mdwc->dev);
Jack Pham9faa51df2017-04-03 18:13:40 -07002272 dev_dbg(mdwc->dev, "IOMMU detached\n");
2273 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002274 }
2275
2276 /* Remove bus voting */
2277 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002278 dbg_event(0xFF, "bus_devote_start", 0);
2279 ret = msm_bus_scale_client_update_request(
2280 mdwc->bus_perf_client, 0);
2281 dbg_event(0xFF, "bus_devote_finish", 0);
2282 if (ret)
2283 dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002284 }
2285
2286 /*
2287 * release wakeup source with timeout to defer system suspend to
2288 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2289 * event is received.
2290 */
2291 if (mdwc->lpm_to_suspend_delay) {
2292 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2293 mdwc->lpm_to_suspend_delay);
2294 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2295 } else {
2296 pm_relax(mdwc->dev);
2297 }
2298
2299 atomic_set(&dwc->in_lpm, 1);
2300
2301 /*
2302 * with DCP or during cable disconnect, we dont require wakeup
2303 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2304 * case of host bus suspend and device bus suspend.
2305 */
2306 if (mdwc->vbus_active || mdwc->in_host_mode) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002307 if (mdwc->use_pdc_interrupts) {
2308 enable_usb_pdc_interrupt(mdwc, true);
2309 } else {
2310 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2311 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
2312 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2313 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
Mayank Rana511f3b22016-08-02 12:00:11 -07002314 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002315 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2316 }
2317
2318 dev_info(mdwc->dev, "DWC3 in low power mode\n");
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302319 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002320 return 0;
2321}
2322
2323static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2324{
2325 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002326 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002327 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Ranad339abe2017-05-31 09:19:49 -07002328 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002329
2330 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2331
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302332 mutex_lock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002333 if (!atomic_read(&dwc->in_lpm)) {
2334 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302335 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002336 return 0;
2337 }
2338
2339 pm_stay_awake(mdwc->dev);
2340
2341 /* Enable bus voting */
2342 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002343 dbg_event(0xFF, "bus_vote_start", 1);
2344 ret = msm_bus_scale_client_update_request(
2345 mdwc->bus_perf_client, 1);
2346 dbg_event(0xFF, "bus_vote_finish", 1);
2347 if (ret)
2348 dev_err(mdwc->dev, "bus bw voting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002349 }
2350
2351 /* Vote for TCXO while waking up USB HSPHY */
2352 ret = clk_prepare_enable(mdwc->xo_clk);
2353 if (ret)
2354 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2355 __func__, ret);
2356
2357 /* Restore controller power collapse */
2358 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2359 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2360 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302361 ret = reset_control_assert(mdwc->core_reset);
2362 if (ret)
2363 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2364 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002365 /* HW requires a short delay for reset to take place properly */
2366 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302367 ret = reset_control_deassert(mdwc->core_reset);
2368 if (ret)
2369 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2370 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002371 clk_prepare_enable(mdwc->sleep_clk);
2372 }
2373
2374 /*
2375 * Enable clocks
2376 * Turned ON iface_clk before core_clk due to FSM depedency.
2377 */
2378 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302379 if (mdwc->noc_aggr_clk)
2380 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002381
2382 core_clk_rate = mdwc->core_clk_rate;
2383 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2384 core_clk_rate = mdwc->core_clk_rate_hs;
2385 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2386 core_clk_rate);
2387 }
2388
2389 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002390 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002391
2392 /* set Memory core: ON, Memory periphery: ON */
2393 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2394 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2395
Mayank Rana511f3b22016-08-02 12:00:11 -07002396 clk_prepare_enable(mdwc->utmi_clk);
2397 if (mdwc->bus_aggr_clk)
2398 clk_prepare_enable(mdwc->bus_aggr_clk);
2399
2400 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002401 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2402 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002403 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2404 if (mdwc->typec_orientation == ORIENTATION_CC1)
2405 mdwc->ss_phy->flags |= PHY_LANE_A;
2406 if (mdwc->typec_orientation == ORIENTATION_CC2)
2407 mdwc->ss_phy->flags |= PHY_LANE_B;
2408 usb_phy_set_suspend(mdwc->ss_phy, 0);
2409 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2410 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2411 }
2412
2413 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2414 /* Resume HS PHY */
2415 usb_phy_set_suspend(mdwc->hs_phy, 0);
2416
2417 /* Recover from controller power collapse */
2418 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2419 u32 tmp;
2420
Jack Pham9faa51df2017-04-03 18:13:40 -07002421 if (mdwc->iommu_map) {
2422 ret = arm_iommu_attach_device(mdwc->dev,
2423 mdwc->iommu_map);
2424 if (ret)
2425 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2426 ret);
2427 else
2428 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2429 }
2430
Mayank Rana511f3b22016-08-02 12:00:11 -07002431 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2432
2433 dwc3_msm_power_collapse_por(mdwc);
2434
2435 /* Get initial P3 status and enable IN_P3 event */
2436 tmp = dwc3_msm_read_reg_field(mdwc->base,
2437 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2438 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2439 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2440 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2441
2442 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2443 }
2444
2445 atomic_set(&dwc->in_lpm, 0);
2446
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302447 /* enable power evt irq for IN P3 detection */
Mayank Ranad339abe2017-05-31 09:19:49 -07002448 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302449
Mayank Rana511f3b22016-08-02 12:00:11 -07002450 /* Disable HSPHY auto suspend */
2451 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2452 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2453 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2454 DWC3_GUSB2PHYCFG_SUSPHY));
2455
2456 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2457 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002458 if (mdwc->use_pdc_interrupts) {
2459 enable_usb_pdc_interrupt(mdwc, false);
2460 } else {
2461 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2462 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
2463 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2464 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07002465 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002466 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2467 }
2468
2469 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2470
Mayank Rana511f3b22016-08-02 12:00:11 -07002471 /* Enable core irq */
2472 if (dwc->irq)
2473 enable_irq(dwc->irq);
2474
2475 /*
2476 * Handle other power events that could not have been handled during
2477 * Low Power Mode
2478 */
2479 dwc3_pwr_event_handler(mdwc);
2480
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302481 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2482 schedule_delayed_work(&mdwc->perf_vote_work,
2483 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2484
Mayank Rana08e41922017-03-02 15:25:48 -08002485 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302486 mutex_unlock(&mdwc->suspend_resume_mutex);
2487
Mayank Rana511f3b22016-08-02 12:00:11 -07002488 return 0;
2489}
2490
2491/**
2492 * dwc3_ext_event_notify - callback to handle events from external transceiver
2493 *
2494 * Returns 0 on success
2495 */
2496static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2497{
2498 /* Flush processing any pending events before handling new ones */
2499 flush_delayed_work(&mdwc->sm_work);
2500
2501 if (mdwc->id_state == DWC3_ID_FLOAT) {
2502 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2503 set_bit(ID, &mdwc->inputs);
2504 } else {
2505 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2506 clear_bit(ID, &mdwc->inputs);
2507 }
2508
2509 if (mdwc->vbus_active && !mdwc->in_restart) {
2510 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2511 set_bit(B_SESS_VLD, &mdwc->inputs);
2512 } else {
2513 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2514 clear_bit(B_SESS_VLD, &mdwc->inputs);
2515 }
2516
2517 if (mdwc->suspend) {
2518 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2519 set_bit(B_SUSPEND, &mdwc->inputs);
2520 } else {
2521 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2522 clear_bit(B_SUSPEND, &mdwc->inputs);
2523 }
2524
2525 schedule_delayed_work(&mdwc->sm_work, 0);
2526}
2527
2528static void dwc3_resume_work(struct work_struct *w)
2529{
2530 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002531 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Jack Pham4e9dff72017-04-04 18:05:53 -07002532 union extcon_property_value val;
2533 unsigned int extcon_id;
2534 struct extcon_dev *edev = NULL;
2535 int ret = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07002536
2537 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2538
Jack Pham4e9dff72017-04-04 18:05:53 -07002539 if (mdwc->vbus_active) {
2540 edev = mdwc->extcon_vbus;
2541 extcon_id = EXTCON_USB;
2542 } else if (mdwc->id_state == DWC3_ID_GROUND) {
2543 edev = mdwc->extcon_id;
2544 extcon_id = EXTCON_USB_HOST;
2545 }
2546
2547 /* Check speed and Type-C polarity values in order to configure PHY */
2548 if (edev && extcon_get_state(edev, extcon_id)) {
2549 ret = extcon_get_property(edev, extcon_id,
2550 EXTCON_PROP_USB_SS, &val);
2551
2552 /* Use default dwc->maximum_speed if speed isn't reported */
2553 if (!ret)
2554 dwc->maximum_speed = (val.intval == 0) ?
2555 USB_SPEED_HIGH : USB_SPEED_SUPER;
2556
2557 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2558 dwc->maximum_speed = dwc->max_hw_supp_speed;
2559
Mayank Ranaf70d8212017-06-12 14:02:07 -07002560 if (override_usb_speed &&
2561 is_valid_usb_speed(dwc, override_usb_speed)) {
2562 dwc->maximum_speed = override_usb_speed;
2563 dbg_event(0xFF, "override_speed", override_usb_speed);
2564 }
2565
Jack Pham4e9dff72017-04-04 18:05:53 -07002566 dbg_event(0xFF, "speed", dwc->maximum_speed);
2567
2568 ret = extcon_get_property(edev, extcon_id,
2569 EXTCON_PROP_USB_TYPEC_POLARITY, &val);
2570 if (ret)
2571 mdwc->typec_orientation = ORIENTATION_NONE;
2572 else
2573 mdwc->typec_orientation = val.intval ?
2574 ORIENTATION_CC2 : ORIENTATION_CC1;
2575
2576 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2577 }
2578
Mayank Rana511f3b22016-08-02 12:00:11 -07002579 /*
2580 * exit LPM first to meet resume timeline from device side.
2581 * resume_pending flag would prevent calling
2582 * dwc3_msm_resume() in case we are here due to system
2583 * wide resume without usb cable connected. This flag is set
2584 * only in case of power event irq in lpm.
2585 */
2586 if (mdwc->resume_pending) {
2587 dwc3_msm_resume(mdwc);
2588 mdwc->resume_pending = false;
2589 }
2590
Mayank Rana08e41922017-03-02 15:25:48 -08002591 if (atomic_read(&mdwc->pm_suspended)) {
2592 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002593 /* let pm resume kick in resume work later */
2594 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002595 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002596 dwc3_ext_event_notify(mdwc);
2597}
2598
2599static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2600{
2601 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2602 u32 irq_stat, irq_clear = 0;
2603
2604 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2605 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2606
2607 /* Check for P3 events */
2608 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2609 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2610 /* Can't tell if entered or exit P3, so check LINKSTATE */
2611 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2612 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2613 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2614 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2615
2616 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2617 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2618 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2619 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2620 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2621 atomic_set(&mdwc->in_p3, 0);
2622 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2623 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2624 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2625 atomic_set(&mdwc->in_p3, 1);
2626 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2627 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2628 }
2629
2630 /* Clear L2 exit */
2631 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2632 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2633 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2634 }
2635
2636 /* Handle exit from L1 events */
2637 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2638 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2639 __func__);
2640 if (usb_gadget_wakeup(&dwc->gadget))
2641 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2642 __func__);
2643 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2644 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2645 }
2646
2647 /* Unhandled events */
2648 if (irq_stat)
2649 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2650 __func__, irq_stat);
2651
2652 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2653}
2654
2655static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2656{
2657 struct dwc3_msm *mdwc = _mdwc;
2658 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2659
2660 dev_dbg(mdwc->dev, "%s\n", __func__);
2661
2662 if (atomic_read(&dwc->in_lpm))
2663 dwc3_resume_work(&mdwc->resume_work);
2664 else
2665 dwc3_pwr_event_handler(mdwc);
2666
Mayank Rana08e41922017-03-02 15:25:48 -08002667 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002668 return IRQ_HANDLED;
2669}
2670
2671static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2672{
2673 struct dwc3_msm *mdwc = data;
2674 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2675
2676 dwc->t_pwr_evt_irq = ktime_get();
2677 dev_dbg(mdwc->dev, "%s received\n", __func__);
2678 /*
2679 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2680 * which interrupts have been triggered, as the clocks are disabled.
2681 * Resume controller by waking up pwr event irq thread.After re-enabling
2682 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2683 * all other power events.
2684 */
2685 if (atomic_read(&dwc->in_lpm)) {
2686 /* set this to call dwc3_msm_resume() */
2687 mdwc->resume_pending = true;
2688 return IRQ_WAKE_THREAD;
2689 }
2690
2691 dwc3_pwr_event_handler(mdwc);
2692 return IRQ_HANDLED;
2693}
2694
2695static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2696 unsigned long action, void *hcpu)
2697{
2698 uint32_t cpu = (uintptr_t)hcpu;
2699 struct dwc3_msm *mdwc =
2700 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2701
2702 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2703 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2704 cpu_to_affin, mdwc->irq_to_affin);
2705 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2706 }
2707
2708 return NOTIFY_OK;
2709}
2710
2711static void dwc3_otg_sm_work(struct work_struct *w);
2712
2713static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2714{
2715 int ret;
2716
2717 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2718 if (IS_ERR(mdwc->dwc3_gdsc))
2719 mdwc->dwc3_gdsc = NULL;
2720
2721 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2722 if (IS_ERR(mdwc->xo_clk)) {
2723 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2724 __func__);
2725 ret = PTR_ERR(mdwc->xo_clk);
2726 return ret;
2727 }
2728 clk_set_rate(mdwc->xo_clk, 19200000);
2729
2730 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2731 if (IS_ERR(mdwc->iface_clk)) {
2732 dev_err(mdwc->dev, "failed to get iface_clk\n");
2733 ret = PTR_ERR(mdwc->iface_clk);
2734 return ret;
2735 }
2736
2737 /*
2738 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2739 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2740 * On newer platform it can run at 150MHz as well.
2741 */
2742 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2743 if (IS_ERR(mdwc->core_clk)) {
2744 dev_err(mdwc->dev, "failed to get core_clk\n");
2745 ret = PTR_ERR(mdwc->core_clk);
2746 return ret;
2747 }
2748
Amit Nischal4d278212016-06-06 17:54:34 +05302749 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2750 if (IS_ERR(mdwc->core_reset)) {
2751 dev_err(mdwc->dev, "failed to get core_reset\n");
2752 return PTR_ERR(mdwc->core_reset);
2753 }
2754
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302755 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302756 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302757 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2758 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302759 }
2760
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302761 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302762 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302763 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2764 mdwc->core_clk_rate);
2765 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2766 if (ret)
2767 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002768
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002769 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2770 (u32 *)&mdwc->core_clk_rate_hs)) {
2771 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2772 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2773 }
2774
Mayank Rana511f3b22016-08-02 12:00:11 -07002775 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2776 if (IS_ERR(mdwc->sleep_clk)) {
2777 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2778 ret = PTR_ERR(mdwc->sleep_clk);
2779 return ret;
2780 }
2781
2782 clk_set_rate(mdwc->sleep_clk, 32000);
2783 mdwc->utmi_clk_rate = 19200000;
2784 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2785 if (IS_ERR(mdwc->utmi_clk)) {
2786 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2787 ret = PTR_ERR(mdwc->utmi_clk);
2788 return ret;
2789 }
2790
2791 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2792 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2793 if (IS_ERR(mdwc->bus_aggr_clk))
2794 mdwc->bus_aggr_clk = NULL;
2795
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302796 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2797 if (IS_ERR(mdwc->noc_aggr_clk))
2798 mdwc->noc_aggr_clk = NULL;
2799
Mayank Rana511f3b22016-08-02 12:00:11 -07002800 if (of_property_match_string(mdwc->dev->of_node,
2801 "clock-names", "cfg_ahb_clk") >= 0) {
2802 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2803 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2804 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2805 mdwc->cfg_ahb_clk = NULL;
2806 if (ret != -EPROBE_DEFER)
2807 dev_err(mdwc->dev,
2808 "failed to get cfg_ahb_clk ret %d\n",
2809 ret);
2810 return ret;
2811 }
2812 }
2813
2814 return 0;
2815}
2816
2817static int dwc3_msm_id_notifier(struct notifier_block *nb,
2818 unsigned long event, void *ptr)
2819{
2820 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002821 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002822 enum dwc3_id_state id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002823
2824 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2825
2826 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2827
Mayank Rana511f3b22016-08-02 12:00:11 -07002828 if (mdwc->id_state != id) {
2829 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002830 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002831 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2832 }
2833
Mayank Rana511f3b22016-08-02 12:00:11 -07002834 return NOTIFY_DONE;
2835}
2836
Hemant Kumar006fae42017-07-12 18:11:25 -07002837
2838static void check_for_sdp_connection(struct work_struct *w)
2839{
Hemant Kumar006fae42017-07-12 18:11:25 -07002840 struct dwc3_msm *mdwc =
2841 container_of(w, struct dwc3_msm, sdp_check.work);
2842 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2843
2844 if (!mdwc->vbus_active)
2845 return;
2846
2847 /* floating D+/D- lines detected */
2848 if (dwc->gadget.state < USB_STATE_DEFAULT &&
2849 dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
Hemant Kumar006fae42017-07-12 18:11:25 -07002850 mdwc->vbus_active = 0;
2851 dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
2852 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2853 }
2854}
2855
Mayank Rana511f3b22016-08-02 12:00:11 -07002856static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2857 unsigned long event, void *ptr)
2858{
2859 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2860 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002861
2862 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2863
2864 if (mdwc->vbus_active == event)
2865 return NOTIFY_DONE;
2866
Mayank Rana511f3b22016-08-02 12:00:11 -07002867 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002868 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002869 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002870
Mayank Rana511f3b22016-08-02 12:00:11 -07002871 return NOTIFY_DONE;
2872}
Jack Pham4e9dff72017-04-04 18:05:53 -07002873
Mayank Rana51958172017-02-28 14:49:21 -08002874/*
Mayank Rana25d02862017-09-12 14:49:41 -07002875 * Handle EUD based soft detach/attach event
Mayank Rana51958172017-02-28 14:49:21 -08002876 *
2877 * @nb - notifier handler
2878 * @event - event information i.e. soft detach/attach event
2879 * @ptr - extcon_dev pointer
2880 *
2881 * @return int - NOTIFY_DONE always due to EUD
2882 */
2883static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2884 unsigned long event, void *ptr)
2885{
2886 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2887 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana51958172017-02-28 14:49:21 -08002888
2889 dbg_event(0xFF, "EUD_NB", event);
2890 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2891 if (mdwc->vbus_active == event)
2892 return NOTIFY_DONE;
2893
Mayank Rana51958172017-02-28 14:49:21 -08002894 mdwc->vbus_active = event;
2895 if (dwc->is_drd && !mdwc->in_restart)
2896 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002897
Mayank Rana51958172017-02-28 14:49:21 -08002898 return NOTIFY_DONE;
2899}
Mayank Rana511f3b22016-08-02 12:00:11 -07002900
2901static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
2902{
2903 struct device_node *node = mdwc->dev->of_node;
2904 struct extcon_dev *edev;
2905 int ret = 0;
2906
2907 if (!of_property_read_bool(node, "extcon"))
2908 return 0;
2909
Mayank Rana51958172017-02-28 14:49:21 -08002910 /* Use first phandle (mandatory) for USB vbus status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002911 edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
2912 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2913 return PTR_ERR(edev);
2914
2915 if (!IS_ERR(edev)) {
2916 mdwc->extcon_vbus = edev;
2917 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2918 ret = extcon_register_notifier(edev, EXTCON_USB,
2919 &mdwc->vbus_nb);
2920 if (ret < 0) {
2921 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2922 return ret;
2923 }
2924 }
2925
Mayank Rana51958172017-02-28 14:49:21 -08002926 /* Use second phandle (optional) for USB ID status notification */
Mayank Rana511f3b22016-08-02 12:00:11 -07002927 if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
2928 edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
2929 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2930 ret = PTR_ERR(edev);
2931 goto err;
2932 }
2933 }
2934
2935 if (!IS_ERR(edev)) {
2936 mdwc->extcon_id = edev;
2937 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
Mayank Rana54d60432017-07-18 12:10:04 -07002938 mdwc->host_restart_nb.notifier_call =
2939 dwc3_restart_usb_host_mode;
Mayank Rana511f3b22016-08-02 12:00:11 -07002940 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2941 &mdwc->id_nb);
2942 if (ret < 0) {
2943 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2944 goto err;
2945 }
Mayank Rana54d60432017-07-18 12:10:04 -07002946
2947 ret = extcon_register_blocking_notifier(edev, EXTCON_USB_HOST,
2948 &mdwc->host_restart_nb);
2949 if (ret < 0) {
2950 dev_err(mdwc->dev, "failed to register blocking notifier\n");
2951 goto err1;
2952 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002953 }
2954
Mayank Rana81bd2e52017-07-26 16:15:15 -07002955 edev = NULL;
Mayank Rana51958172017-02-28 14:49:21 -08002956 /* Use third phandle (optional) for EUD based detach/attach events */
2957 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2958 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2959 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2960 ret = PTR_ERR(edev);
Mayank Rana54d60432017-07-18 12:10:04 -07002961 goto err1;
Mayank Rana51958172017-02-28 14:49:21 -08002962 }
2963 }
2964
Mayank Rana81bd2e52017-07-26 16:15:15 -07002965 if (!IS_ERR_OR_NULL(edev)) {
Mayank Rana51958172017-02-28 14:49:21 -08002966 mdwc->extcon_eud = edev;
2967 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2968 ret = extcon_register_notifier(edev, EXTCON_USB,
2969 &mdwc->eud_event_nb);
2970 if (ret < 0) {
2971 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
Mayank Rana54d60432017-07-18 12:10:04 -07002972 goto err2;
Mayank Rana51958172017-02-28 14:49:21 -08002973 }
2974 }
2975
Mayank Rana511f3b22016-08-02 12:00:11 -07002976 return 0;
Mayank Rana54d60432017-07-18 12:10:04 -07002977err2:
2978 if (mdwc->extcon_id)
2979 extcon_unregister_blocking_notifier(mdwc->extcon_id,
2980 EXTCON_USB_HOST, &mdwc->host_restart_nb);
Mayank Rana51958172017-02-28 14:49:21 -08002981err1:
2982 if (mdwc->extcon_id)
2983 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2984 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002985err:
2986 if (mdwc->extcon_vbus)
2987 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
2988 &mdwc->vbus_nb);
2989 return ret;
2990}
2991
Mayank Rana00d6f722017-09-18 17:22:03 -07002992#define SMMU_BASE 0x60000000 /* Device address range base */
2993#define SMMU_SIZE 0x90000000 /* Device address range size */
Jack Phambbe27962017-03-23 18:42:26 -07002994
2995static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
2996{
2997 struct device_node *node = mdwc->dev->of_node;
Jack Pham283cece2017-04-05 09:58:17 -07002998 int atomic_ctx = 1, s1_bypass;
Jack Phambbe27962017-03-23 18:42:26 -07002999 int ret;
3000
3001 if (!of_property_read_bool(node, "iommus"))
3002 return 0;
3003
3004 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
3005 SMMU_BASE, SMMU_SIZE);
3006 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
3007 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
3008 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
3009 ret);
3010 return ret;
3011 }
3012 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
3013
3014 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
3015 &atomic_ctx);
3016 if (ret) {
3017 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
3018 ret);
Jack Pham9faa51df2017-04-03 18:13:40 -07003019 goto release_mapping;
Jack Phambbe27962017-03-23 18:42:26 -07003020 }
3021
Jack Pham283cece2017-04-05 09:58:17 -07003022 s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
3023 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
3024 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
3025 if (ret) {
3026 dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
3027 s1_bypass, ret);
3028 goto release_mapping;
3029 }
3030
Jack Pham9faa51df2017-04-03 18:13:40 -07003031 ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
3032 if (ret) {
3033 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
3034 goto release_mapping;
3035 }
3036 dev_dbg(mdwc->dev, "attached to IOMMU\n");
3037
Jack Phambbe27962017-03-23 18:42:26 -07003038 return 0;
Jack Pham9faa51df2017-04-03 18:13:40 -07003039
3040release_mapping:
3041 arm_iommu_release_mapping(mdwc->iommu_map);
3042 mdwc->iommu_map = NULL;
3043 return ret;
Jack Phambbe27962017-03-23 18:42:26 -07003044}
3045
Mayank Rana511f3b22016-08-02 12:00:11 -07003046static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
3047 char *buf)
3048{
3049 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3050
3051 if (mdwc->vbus_active)
3052 return snprintf(buf, PAGE_SIZE, "peripheral\n");
3053 if (mdwc->id_state == DWC3_ID_GROUND)
3054 return snprintf(buf, PAGE_SIZE, "host\n");
3055
3056 return snprintf(buf, PAGE_SIZE, "none\n");
3057}
3058
3059static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
3060 const char *buf, size_t count)
3061{
3062 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3063
3064 if (sysfs_streq(buf, "peripheral")) {
3065 mdwc->vbus_active = true;
3066 mdwc->id_state = DWC3_ID_FLOAT;
3067 } else if (sysfs_streq(buf, "host")) {
3068 mdwc->vbus_active = false;
3069 mdwc->id_state = DWC3_ID_GROUND;
3070 } else {
3071 mdwc->vbus_active = false;
3072 mdwc->id_state = DWC3_ID_FLOAT;
3073 }
3074
3075 dwc3_ext_event_notify(mdwc);
3076
3077 return count;
3078}
3079
3080static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303081static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07003082
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003083static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
3084 char *buf)
3085{
3086 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3087 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3088
3089 return snprintf(buf, PAGE_SIZE, "%s\n",
3090 usb_speed_string(dwc->max_hw_supp_speed));
3091}
3092
3093static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
3094 const char *buf, size_t count)
3095{
3096 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3097 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3098 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
3099
3100 if (sysfs_streq(buf, "high"))
3101 req_speed = USB_SPEED_HIGH;
3102 else if (sysfs_streq(buf, "super"))
3103 req_speed = USB_SPEED_SUPER;
3104
3105 if (req_speed != USB_SPEED_UNKNOWN &&
3106 req_speed != dwc->max_hw_supp_speed) {
3107 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
3108 schedule_work(&mdwc->restart_usb_work);
3109 }
3110
3111 return count;
3112}
3113static DEVICE_ATTR_RW(speed);
3114
Mayank Rana511f3b22016-08-02 12:00:11 -07003115static int dwc3_msm_probe(struct platform_device *pdev)
3116{
3117 struct device_node *node = pdev->dev.of_node, *dwc3_node;
3118 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08003119 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003120 struct dwc3_msm *mdwc;
3121 struct dwc3 *dwc;
3122 struct resource *res;
3123 void __iomem *tcsr;
3124 bool host_mode;
Mayank Ranad339abe2017-05-31 09:19:49 -07003125 int ret = 0, i;
Mayank Rana511f3b22016-08-02 12:00:11 -07003126 int ext_hub_reset_gpio;
3127 u32 val;
Mayank Ranad339abe2017-05-31 09:19:49 -07003128 unsigned long irq_type;
Mayank Rana511f3b22016-08-02 12:00:11 -07003129
3130 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
3131 if (!mdwc)
3132 return -ENOMEM;
3133
3134 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
3135 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
3136 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
3137 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
3138 return -EOPNOTSUPP;
3139 }
3140 }
3141
3142 platform_set_drvdata(pdev, mdwc);
3143 mdwc->dev = &pdev->dev;
3144
3145 INIT_LIST_HEAD(&mdwc->req_complete_list);
3146 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
3147 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07003148 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003149 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303150 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Hemant Kumar006fae42017-07-12 18:11:25 -07003151 INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
Mayank Rana511f3b22016-08-02 12:00:11 -07003152
3153 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
3154 if (!mdwc->dwc3_wq) {
3155 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
3156 return -ENOMEM;
3157 }
3158
3159 /* Get all clks and gdsc reference */
3160 ret = dwc3_msm_get_clk_gdsc(mdwc);
3161 if (ret) {
3162 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
Ziqi Chen0ea81162017-08-04 18:17:55 +08003163 goto err;
Mayank Rana511f3b22016-08-02 12:00:11 -07003164 }
3165
3166 mdwc->id_state = DWC3_ID_FLOAT;
3167 set_bit(ID, &mdwc->inputs);
3168
3169 mdwc->charging_disabled = of_property_read_bool(node,
3170 "qcom,charging-disabled");
3171
3172 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
3173 &mdwc->lpm_to_suspend_delay);
3174 if (ret) {
3175 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
3176 mdwc->lpm_to_suspend_delay = 0;
3177 }
3178
Mayank Ranad339abe2017-05-31 09:19:49 -07003179 memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
3180 for (i = 0; i < USB_MAX_IRQ; i++) {
3181 irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
3182 IRQF_ONESHOT;
3183 mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
3184 mdwc->wakeup_irq[i].name);
3185 if (mdwc->wakeup_irq[i].irq < 0) {
3186 /* pwr_evnt_irq is only mandatory irq */
3187 if (!strcmp(mdwc->wakeup_irq[i].name,
3188 "pwr_event_irq")) {
3189 dev_err(&pdev->dev, "get_irq for %s failed\n\n",
3190 mdwc->wakeup_irq[i].name);
3191 ret = -EINVAL;
3192 goto err;
3193 }
3194 mdwc->wakeup_irq[i].irq = 0;
3195 } else {
3196 irq_set_status_flags(mdwc->wakeup_irq[i].irq,
3197 IRQ_NOAUTOEN);
3198 /* ss_phy_irq is level trigger interrupt */
3199 if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
3200 irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
3201 IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
Mayank Rana511f3b22016-08-02 12:00:11 -07003202
Mayank Ranad339abe2017-05-31 09:19:49 -07003203 ret = devm_request_threaded_irq(&pdev->dev,
3204 mdwc->wakeup_irq[i].irq,
Mayank Rana511f3b22016-08-02 12:00:11 -07003205 msm_dwc3_pwr_irq,
3206 msm_dwc3_pwr_irq_thread,
Mayank Ranad339abe2017-05-31 09:19:49 -07003207 irq_type,
3208 mdwc->wakeup_irq[i].name, mdwc);
3209 if (ret) {
3210 dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
3211 mdwc->wakeup_irq[i].name, ret);
3212 goto err;
3213 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003214 }
3215 }
3216
3217 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3218 if (!res) {
3219 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3220 } else {
3221 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3222 resource_size(res));
3223 if (IS_ERR_OR_NULL(tcsr)) {
3224 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3225 } else {
3226 /* Enable USB3 on the primary USB port. */
3227 writel_relaxed(0x1, tcsr);
3228 /*
3229 * Ensure that TCSR write is completed before
3230 * USB registers initialization.
3231 */
3232 mb();
3233 }
3234 }
3235
3236 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3237 if (!res) {
3238 dev_err(&pdev->dev, "missing memory base resource\n");
3239 ret = -ENODEV;
3240 goto err;
3241 }
3242
3243 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3244 resource_size(res));
3245 if (!mdwc->base) {
3246 dev_err(&pdev->dev, "ioremap failed\n");
3247 ret = -ENODEV;
3248 goto err;
3249 }
3250
3251 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3252 "ahb2phy_base");
3253 if (res) {
3254 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3255 res->start, resource_size(res));
3256 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3257 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3258 mdwc->ahb2phy_base = NULL;
3259 } else {
3260 /*
3261 * On some targets cfg_ahb_clk depends upon usb gdsc
3262 * regulator. If cfg_ahb_clk is enabled without
3263 * turning on usb gdsc regulator clk is stuck off.
3264 */
3265 dwc3_msm_config_gdsc(mdwc, 1);
3266 clk_prepare_enable(mdwc->cfg_ahb_clk);
3267 /* Configure AHB2PHY for one wait state read/write*/
3268 val = readl_relaxed(mdwc->ahb2phy_base +
3269 PERIPH_SS_AHB2PHY_TOP_CFG);
3270 if (val != ONE_READ_WRITE_WAIT) {
3271 writel_relaxed(ONE_READ_WRITE_WAIT,
3272 mdwc->ahb2phy_base +
3273 PERIPH_SS_AHB2PHY_TOP_CFG);
3274 /* complete above write before using USB PHY */
3275 mb();
3276 }
3277 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3278 dwc3_msm_config_gdsc(mdwc, 0);
3279 }
3280 }
3281
3282 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3283 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3284 if (IS_ERR(mdwc->dbm)) {
3285 dev_err(&pdev->dev, "unable to get dbm device\n");
3286 ret = -EPROBE_DEFER;
3287 goto err;
3288 }
3289 /*
3290 * Add power event if the dbm indicates coming out of L1
3291 * by interrupt
3292 */
3293 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
Mayank Ranad339abe2017-05-31 09:19:49 -07003294 if (!mdwc->wakeup_irq[PWR_EVNT_IRQ].irq) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003295 dev_err(&pdev->dev,
3296 "need pwr_event_irq exiting L1\n");
3297 ret = -EINVAL;
3298 goto err;
3299 }
3300 }
3301 }
3302
3303 ext_hub_reset_gpio = of_get_named_gpio(node,
3304 "qcom,ext-hub-reset-gpio", 0);
3305
3306 if (gpio_is_valid(ext_hub_reset_gpio)
3307 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3308 "qcom,ext-hub-reset-gpio"))) {
3309 /* reset external hub */
3310 gpio_direction_output(ext_hub_reset_gpio, 1);
3311 /*
3312 * Hub reset should be asserted for minimum 5microsec
3313 * before deasserting.
3314 */
3315 usleep_range(5, 1000);
3316 gpio_direction_output(ext_hub_reset_gpio, 0);
3317 }
3318
3319 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3320 &mdwc->tx_fifo_size))
3321 dev_err(&pdev->dev,
3322 "unable to read platform data tx fifo size\n");
3323
3324 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3325 "qcom,disable-host-mode-pm");
Mayank Ranad339abe2017-05-31 09:19:49 -07003326 mdwc->use_pdc_interrupts = of_property_read_bool(node,
3327 "qcom,use-pdc-interrupts");
Mayank Rana511f3b22016-08-02 12:00:11 -07003328 dwc3_set_notifier(&dwc3_msm_notify_event);
3329
Jack Phambbe27962017-03-23 18:42:26 -07003330 ret = dwc3_msm_init_iommu(mdwc);
3331 if (ret)
3332 goto err;
3333
Mayank Rana511f3b22016-08-02 12:00:11 -07003334 /* Assumes dwc3 is the first DT child of dwc3-msm */
3335 dwc3_node = of_get_next_available_child(node, NULL);
3336 if (!dwc3_node) {
3337 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3338 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003339 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003340 }
3341
3342 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3343 if (ret) {
3344 dev_err(&pdev->dev,
3345 "failed to add create dwc3 core\n");
3346 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003347 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003348 }
3349
3350 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3351 of_node_put(dwc3_node);
3352 if (!mdwc->dwc3) {
3353 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3354 goto put_dwc3;
3355 }
3356
3357 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3358 "usb-phy", 0);
3359 if (IS_ERR(mdwc->hs_phy)) {
3360 dev_err(&pdev->dev, "unable to get hsphy device\n");
3361 ret = PTR_ERR(mdwc->hs_phy);
3362 goto put_dwc3;
3363 }
3364 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3365 "usb-phy", 1);
3366 if (IS_ERR(mdwc->ss_phy)) {
3367 dev_err(&pdev->dev, "unable to get ssphy device\n");
3368 ret = PTR_ERR(mdwc->ss_phy);
3369 goto put_dwc3;
3370 }
3371
3372 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3373 if (mdwc->bus_scale_table) {
3374 mdwc->bus_perf_client =
3375 msm_bus_scale_register_client(mdwc->bus_scale_table);
3376 }
3377
3378 dwc = platform_get_drvdata(mdwc->dwc3);
3379 if (!dwc) {
3380 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3381 goto put_dwc3;
3382 }
3383
3384 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3385 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3386
3387 if (cpu_to_affin)
3388 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3389
Mayank Ranaf4918d32016-12-15 13:35:55 -08003390 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3391 &mdwc->num_gsi_event_buffers);
3392
Jack Pham9faa51df2017-04-03 18:13:40 -07003393 /* IOMMU will be reattached upon each resume/connect */
3394 if (mdwc->iommu_map)
3395 arm_iommu_detach_device(mdwc->dev);
3396
Mayank Rana511f3b22016-08-02 12:00:11 -07003397 /*
3398 * Clocks and regulators will not be turned on until the first time
3399 * runtime PM resume is called. This is to allow for booting up with
3400 * charger already connected so as not to disturb PHY line states.
3401 */
3402 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3403 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003404 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3405 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003406 device_init_wakeup(mdwc->dev, 1);
3407
3408 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3409 pm_runtime_get_noresume(mdwc->dev);
3410
3411 ret = dwc3_msm_extcon_register(mdwc);
3412 if (ret)
3413 goto put_dwc3;
3414
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303415 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3416 &mdwc->pm_qos_latency);
3417 if (ret) {
3418 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3419 mdwc->pm_qos_latency = 0;
3420 }
3421
Hemant Kumar8220a982017-01-19 18:11:34 -08003422 mdwc->usb_psy = power_supply_get_by_name("usb");
3423 if (!mdwc->usb_psy) {
3424 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3425 pval.intval = -EINVAL;
3426 } else {
3427 power_supply_get_property(mdwc->usb_psy,
3428 POWER_SUPPLY_PROP_PRESENT, &pval);
3429 }
3430
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05303431 mutex_init(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07003432 /* Update initial VBUS/ID state from extcon */
Jack Pham4e9dff72017-04-04 18:05:53 -07003433 if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
Mayank Rana511f3b22016-08-02 12:00:11 -07003434 EXTCON_USB))
3435 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Jack Pham4e9dff72017-04-04 18:05:53 -07003436 else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003437 EXTCON_USB_HOST))
3438 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003439 else if (!pval.intval) {
3440 /* USB cable is not connected */
3441 schedule_delayed_work(&mdwc->sm_work, 0);
3442 } else {
3443 if (pval.intval > 0)
3444 dev_info(mdwc->dev, "charger detection in progress\n");
3445 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003446
3447 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003448 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003449
Mayank Rana511f3b22016-08-02 12:00:11 -07003450 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3451 if (!dwc->is_drd && host_mode) {
3452 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3453 mdwc->id_state = DWC3_ID_GROUND;
3454 dwc3_ext_event_notify(mdwc);
3455 }
3456
3457 return 0;
3458
3459put_dwc3:
Mayank Rana511f3b22016-08-02 12:00:11 -07003460 if (mdwc->bus_perf_client)
3461 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Ziqi Chen0ea81162017-08-04 18:17:55 +08003462
Jack Phambbe27962017-03-23 18:42:26 -07003463uninit_iommu:
Jack Pham9faa51df2017-04-03 18:13:40 -07003464 if (mdwc->iommu_map) {
3465 arm_iommu_detach_device(mdwc->dev);
Jack Phambbe27962017-03-23 18:42:26 -07003466 arm_iommu_release_mapping(mdwc->iommu_map);
Jack Pham9faa51df2017-04-03 18:13:40 -07003467 }
Ziqi Chen0ea81162017-08-04 18:17:55 +08003468 of_platform_depopulate(&pdev->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003469err:
Ziqi Chen0ea81162017-08-04 18:17:55 +08003470 destroy_workqueue(mdwc->dwc3_wq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003471 return ret;
3472}
3473
Mayank Rana511f3b22016-08-02 12:00:11 -07003474static int dwc3_msm_remove(struct platform_device *pdev)
3475{
3476 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003477 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003478 int ret_pm;
3479
3480 device_remove_file(&pdev->dev, &dev_attr_mode);
3481
3482 if (cpu_to_affin)
3483 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3484
3485 /*
3486 * In case of system suspend, pm_runtime_get_sync fails.
3487 * Hence turn ON the clocks manually.
3488 */
3489 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003490 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003491 if (ret_pm < 0) {
3492 dev_err(mdwc->dev,
3493 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303494 if (mdwc->noc_aggr_clk)
3495 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003496 clk_prepare_enable(mdwc->utmi_clk);
3497 clk_prepare_enable(mdwc->core_clk);
3498 clk_prepare_enable(mdwc->iface_clk);
3499 clk_prepare_enable(mdwc->sleep_clk);
3500 if (mdwc->bus_aggr_clk)
3501 clk_prepare_enable(mdwc->bus_aggr_clk);
3502 clk_prepare_enable(mdwc->xo_clk);
3503 }
3504
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303505 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003506 cancel_delayed_work_sync(&mdwc->sm_work);
3507
3508 if (mdwc->hs_phy)
3509 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Ziqi Chen0ea81162017-08-04 18:17:55 +08003510 of_platform_depopulate(&pdev->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003511
Mayank Rana08e41922017-03-02 15:25:48 -08003512 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003513 pm_runtime_disable(mdwc->dev);
3514 pm_runtime_barrier(mdwc->dev);
3515 pm_runtime_put_sync(mdwc->dev);
3516 pm_runtime_set_suspended(mdwc->dev);
3517 device_wakeup_disable(mdwc->dev);
3518
3519 if (mdwc->bus_perf_client)
3520 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3521
3522 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3523 regulator_disable(mdwc->vbus_reg);
3524
Mayank Ranad339abe2017-05-31 09:19:49 -07003525 if (mdwc->wakeup_irq[HS_PHY_IRQ].irq)
3526 disable_irq(mdwc->wakeup_irq[HS_PHY_IRQ].irq);
3527 if (mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq)
3528 disable_irq(mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq);
3529 if (mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq)
3530 disable_irq(mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq);
3531 if (mdwc->wakeup_irq[SS_PHY_IRQ].irq)
3532 disable_irq(mdwc->wakeup_irq[SS_PHY_IRQ].irq);
3533 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003534
3535 clk_disable_unprepare(mdwc->utmi_clk);
3536 clk_set_rate(mdwc->core_clk, 19200000);
3537 clk_disable_unprepare(mdwc->core_clk);
3538 clk_disable_unprepare(mdwc->iface_clk);
3539 clk_disable_unprepare(mdwc->sleep_clk);
3540 clk_disable_unprepare(mdwc->xo_clk);
3541 clk_put(mdwc->xo_clk);
3542
3543 dwc3_msm_config_gdsc(mdwc, 0);
3544
Jack Phambbe27962017-03-23 18:42:26 -07003545 if (mdwc->iommu_map) {
3546 if (!atomic_read(&dwc->in_lpm))
3547 arm_iommu_detach_device(mdwc->dev);
3548 arm_iommu_release_mapping(mdwc->iommu_map);
3549 }
3550
Mayank Rana511f3b22016-08-02 12:00:11 -07003551 return 0;
3552}
3553
Jack Pham4d4e9342016-12-07 19:25:02 -08003554static int dwc3_msm_host_notifier(struct notifier_block *nb,
3555 unsigned long event, void *ptr)
3556{
3557 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3558 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3559 struct usb_device *udev = ptr;
3560 union power_supply_propval pval;
3561 unsigned int max_power;
3562
3563 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3564 return NOTIFY_DONE;
3565
3566 if (!mdwc->usb_psy) {
3567 mdwc->usb_psy = power_supply_get_by_name("usb");
3568 if (!mdwc->usb_psy)
3569 return NOTIFY_DONE;
3570 }
3571
3572 /*
3573 * For direct-attach devices, new udev is direct child of root hub
3574 * i.e. dwc -> xhci -> root_hub -> udev
3575 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3576 */
3577 if (udev->parent && !udev->parent->parent &&
3578 udev->dev.parent->parent == &dwc->xhci->dev) {
3579 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003580 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3581 /*
3582 * Core clock rate can be reduced only if root
3583 * hub SS port is not enabled/connected.
3584 */
3585 clk_set_rate(mdwc->core_clk,
3586 mdwc->core_clk_rate_hs);
3587 dev_dbg(mdwc->dev,
3588 "set hs core clk rate %ld\n",
3589 mdwc->core_clk_rate_hs);
3590 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3591 } else {
3592 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3593 }
3594
Jack Pham4d4e9342016-12-07 19:25:02 -08003595 if (udev->speed >= USB_SPEED_SUPER)
3596 max_power = udev->actconfig->desc.bMaxPower * 8;
3597 else
3598 max_power = udev->actconfig->desc.bMaxPower * 2;
3599 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3600 dev_name(&udev->dev), max_power);
3601
3602 /* inform PMIC of max power so it can optimize boost */
3603 pval.intval = max_power * 1000;
3604 power_supply_set_property(mdwc->usb_psy,
3605 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3606 } else {
3607 pval.intval = 0;
3608 power_supply_set_property(mdwc->usb_psy,
3609 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003610
3611 /* set rate back to default core clk rate */
3612 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3613 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3614 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003615 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003616 }
3617 }
3618
3619 return NOTIFY_DONE;
3620}
3621
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303622static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3623{
3624 static bool curr_perf_mode;
3625 int latency = mdwc->pm_qos_latency;
3626
3627 if ((curr_perf_mode == perf_mode) || !latency)
3628 return;
3629
3630 if (perf_mode)
3631 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3632 else
3633 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3634 PM_QOS_DEFAULT_VALUE);
3635
3636 curr_perf_mode = perf_mode;
3637 pr_debug("%s: latency updated to: %d\n", __func__,
3638 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3639}
3640
3641static void msm_dwc3_perf_vote_work(struct work_struct *w)
3642{
3643 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3644 perf_vote_work.work);
3645 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3646 static unsigned long last_irq_cnt;
3647 bool in_perf_mode = false;
3648
3649 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3650 in_perf_mode = true;
3651
3652 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3653 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3654
3655 last_irq_cnt = dwc->irq_cnt;
3656 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3657 schedule_delayed_work(&mdwc->perf_vote_work,
3658 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3659}
3660
Mayank Rana511f3b22016-08-02 12:00:11 -07003661#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3662
3663/**
3664 * dwc3_otg_start_host - helper function for starting/stoping the host
3665 * controller driver.
3666 *
3667 * @mdwc: Pointer to the dwc3_msm structure.
3668 * @on: start / stop the host controller driver.
3669 *
3670 * Returns 0 on success otherwise negative errno.
3671 */
3672static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3673{
3674 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3675 int ret = 0;
3676
Mayank Rana511f3b22016-08-02 12:00:11 -07003677 /*
3678 * The vbus_reg pointer could have multiple values
3679 * NULL: regulator_get() hasn't been called, or was previously deferred
3680 * IS_ERR: regulator could not be obtained, so skip using it
3681 * Valid pointer otherwise
3682 */
3683 if (!mdwc->vbus_reg) {
3684 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3685 "vbus_dwc3");
3686 if (IS_ERR(mdwc->vbus_reg) &&
3687 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3688 /* regulators may not be ready, so retry again later */
3689 mdwc->vbus_reg = NULL;
3690 return -EPROBE_DEFER;
3691 }
3692 }
3693
3694 if (on) {
3695 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3696
Mayank Rana511f3b22016-08-02 12:00:11 -07003697 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003698 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Hemant Kumarde1df692016-04-26 19:36:48 -07003699 mdwc->ss_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003700 usb_phy_notify_connect(mdwc->ss_phy,
3701 USB_SPEED_SUPER);
3702 }
Hemant Kumarde1df692016-04-26 19:36:48 -07003703
Mayank Rana0d5efd72017-06-08 10:06:00 -07003704 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
Hemant Kumarde1df692016-04-26 19:36:48 -07003705 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003706 dbg_event(0xFF, "StrtHost gync",
3707 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003708 if (!IS_ERR(mdwc->vbus_reg))
3709 ret = regulator_enable(mdwc->vbus_reg);
3710 if (ret) {
3711 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3712 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3713 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3714 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003715 dbg_event(0xFF, "vregerr psync",
3716 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003717 return ret;
3718 }
3719
3720 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3721
Jack Pham4d4e9342016-12-07 19:25:02 -08003722 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3723 usb_register_notify(&mdwc->host_nb);
3724
Manu Gautam976fdfc2016-08-18 09:27:35 +05303725 mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
3726 usb_register_atomic_notify(&mdwc->usbdev_nb);
Mayank Ranaa75caa52017-10-10 11:45:13 -07003727 ret = dwc3_host_init(dwc);
Mayank Rana511f3b22016-08-02 12:00:11 -07003728 if (ret) {
3729 dev_err(mdwc->dev,
3730 "%s: failed to add XHCI pdev ret=%d\n",
3731 __func__, ret);
3732 if (!IS_ERR(mdwc->vbus_reg))
3733 regulator_disable(mdwc->vbus_reg);
3734 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3735 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3736 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003737 dbg_event(0xFF, "pdeverr psync",
3738 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003739 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003740 return ret;
3741 }
3742
3743 /*
3744 * In some cases it is observed that USB PHY is not going into
3745 * suspend with host mode suspend functionality. Hence disable
3746 * XHCI's runtime PM here if disable_host_mode_pm is set.
3747 */
3748 if (mdwc->disable_host_mode_pm)
3749 pm_runtime_disable(&dwc->xhci->dev);
3750
3751 mdwc->in_host_mode = true;
3752 dwc3_usb3_phy_suspend(dwc, true);
3753
3754 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003755 dbg_event(0xFF, "StrtHost psync",
3756 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003757 pm_runtime_mark_last_busy(mdwc->dev);
3758 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303759#ifdef CONFIG_SMP
3760 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3761 mdwc->pm_qos_req_dma.irq = dwc->irq;
3762#endif
3763 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3764 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3765 /* start in perf mode for better performance initially */
3766 msm_dwc3_perf_vote_update(mdwc, true);
3767 schedule_delayed_work(&mdwc->perf_vote_work,
3768 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003769 } else {
3770 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3771
Manu Gautam976fdfc2016-08-18 09:27:35 +05303772 usb_unregister_atomic_notify(&mdwc->usbdev_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003773 if (!IS_ERR(mdwc->vbus_reg))
3774 ret = regulator_disable(mdwc->vbus_reg);
3775 if (ret) {
3776 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3777 return ret;
3778 }
3779
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303780 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3781 msm_dwc3_perf_vote_update(mdwc, false);
3782 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3783
Mayank Rana511f3b22016-08-02 12:00:11 -07003784 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003785 dbg_event(0xFF, "StopHost gsync",
3786 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003787 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
Mayank Rana0d5efd72017-06-08 10:06:00 -07003788 if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
3789 usb_phy_notify_disconnect(mdwc->ss_phy,
3790 USB_SPEED_SUPER);
3791 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3792 }
3793
Mayank Rana511f3b22016-08-02 12:00:11 -07003794 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Mayank Ranaa75caa52017-10-10 11:45:13 -07003795 dwc3_host_exit(dwc);
Jack Pham4d4e9342016-12-07 19:25:02 -08003796 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003797
Mayank Rana511f3b22016-08-02 12:00:11 -07003798 dwc3_usb3_phy_suspend(dwc, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07003799 mdwc->in_host_mode = false;
3800
Mayank Ranaa1d094c2017-11-03 10:40:10 -07003801 pm_runtime_put_sync_suspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003802 dbg_event(0xFF, "StopHost psync",
3803 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003804 }
3805
3806 return 0;
3807}
3808
3809static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3810{
3811 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3812
3813 /* Update OTG VBUS Valid from HSPHY to controller */
3814 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3815 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3816 UTMI_OTG_VBUS_VALID,
3817 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3818
3819 /* Update only if Super Speed is supported */
3820 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3821 /* Update VBUS Valid from SSPHY to controller */
3822 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3823 LANE0_PWR_PRESENT,
3824 vbus_present ? LANE0_PWR_PRESENT : 0);
3825 }
3826}
3827
3828/**
3829 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3830 *
3831 * @mdwc: Pointer to the dwc3_msm structure.
3832 * @on: Turn ON/OFF the gadget.
3833 *
3834 * Returns 0 on success otherwise negative errno.
3835 */
3836static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3837{
3838 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3839
3840 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003841 dbg_event(0xFF, "StrtGdgt gsync",
3842 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003843
3844 if (on) {
3845 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3846 __func__, dwc->gadget.name);
3847
3848 dwc3_override_vbus_status(mdwc, true);
3849 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3850 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3851
3852 /*
3853 * Core reset is not required during start peripheral. Only
3854 * DBM reset is required, hence perform only DBM reset here.
3855 */
3856 dwc3_msm_block_reset(mdwc, false);
3857
3858 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
3859 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303860#ifdef CONFIG_SMP
3861 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3862 mdwc->pm_qos_req_dma.irq = dwc->irq;
3863#endif
3864 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3865 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3866 /* start in perf mode for better performance initially */
3867 msm_dwc3_perf_vote_update(mdwc, true);
3868 schedule_delayed_work(&mdwc->perf_vote_work,
3869 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003870 } else {
3871 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3872 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303873 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3874 msm_dwc3_perf_vote_update(mdwc, false);
3875 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3876
Mayank Rana511f3b22016-08-02 12:00:11 -07003877 usb_gadget_vbus_disconnect(&dwc->gadget);
3878 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3879 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3880 dwc3_override_vbus_status(mdwc, false);
3881 dwc3_usb3_phy_suspend(dwc, false);
3882 }
3883
3884 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003885 dbg_event(0xFF, "StopGdgt psync",
3886 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003887
3888 return 0;
3889}
3890
Mayank Rana54d60432017-07-18 12:10:04 -07003891/* speed: 0 - USB_SPEED_HIGH, 1 - USB_SPEED_SUPER */
3892static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
3893 unsigned long event, void *ptr)
3894{
3895 struct dwc3_msm *mdwc;
3896 struct dwc3 *dwc;
3897 int ret = -EINVAL, usb_speed;
3898
3899 mdwc = container_of(nb, struct dwc3_msm, host_restart_nb);
3900 dwc = platform_get_drvdata(mdwc->dwc3);
3901
3902 usb_speed = (event == 0 ? USB_SPEED_HIGH : USB_SPEED_SUPER);
3903 if (dwc->maximum_speed == usb_speed)
3904 goto err;
3905
Mayank Rana8a5cba82017-10-27 15:12:54 -07003906 dbg_event(0xFF, "fw_restarthost", 0);
3907 flush_delayed_work(&mdwc->sm_work);
Mayank Rana54d60432017-07-18 12:10:04 -07003908 dbg_event(0xFF, "stop_host_mode", dwc->maximum_speed);
3909 ret = dwc3_otg_start_host(mdwc, 0);
3910 if (ret)
3911 goto err;
3912
3913 /*
3914 * stop host mode functionality performs autosuspend with mdwc
3915 * device, and it may take sometime to call PM runtime suspend.
3916 * Hence call pm_runtime_suspend() API to invoke PM runtime
3917 * suspend immediately to put USB controller and PHYs into suspend.
3918 */
3919 ret = pm_runtime_suspend(mdwc->dev);
3920 dbg_event(0xFF, "pm_runtime_sus", ret);
3921
3922 dwc->maximum_speed = usb_speed;
3923 mdwc->otg_state = OTG_STATE_B_IDLE;
3924 schedule_delayed_work(&mdwc->sm_work, 0);
3925 dbg_event(0xFF, "complete_host_change", dwc->maximum_speed);
3926err:
3927 return ret;
3928}
3929
Hemant Kumar006fae42017-07-12 18:11:25 -07003930static int get_psy_type(struct dwc3_msm *mdwc)
Mayank Rana511f3b22016-08-02 12:00:11 -07003931{
Jack Pham8caff352016-08-19 16:33:55 -07003932 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003933
3934 if (mdwc->charging_disabled)
Hemant Kumar006fae42017-07-12 18:11:25 -07003935 return -EINVAL;
Mayank Rana511f3b22016-08-02 12:00:11 -07003936
3937 if (!mdwc->usb_psy) {
3938 mdwc->usb_psy = power_supply_get_by_name("usb");
3939 if (!mdwc->usb_psy) {
Hemant Kumar006fae42017-07-12 18:11:25 -07003940 dev_err(mdwc->dev, "Could not get usb psy\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003941 return -ENODEV;
3942 }
3943 }
3944
Hemant Kumar006fae42017-07-12 18:11:25 -07003945 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
3946 &pval);
3947
3948 return pval.intval;
3949}
3950
3951static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
3952{
3953 union power_supply_propval pval = {0};
3954 int ret, psy_type;
3955
Hemant Kumar006fae42017-07-12 18:11:25 -07003956 psy_type = get_psy_type(mdwc);
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05303957 if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
Hemant Kumard6bae052017-07-27 15:11:25 -07003958 pval.intval = -ETIMEDOUT;
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05303959 goto set_prop;
Hemant Kumard6bae052017-07-27 15:11:25 -07003960 }
Jack Pham8caff352016-08-19 16:33:55 -07003961
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05303962 if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
3963 return 0;
3964
3965 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
3966 /* Set max current limit in uA */
3967 pval.intval = 1000 * mA;
3968
3969set_prop:
Jack Phamd72bafe2016-08-09 11:07:22 -07003970 ret = power_supply_set_property(mdwc->usb_psy,
Nicholas Troast7f55c922017-07-25 13:18:03 -07003971 POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
Jack Phamd72bafe2016-08-09 11:07:22 -07003972 if (ret) {
3973 dev_dbg(mdwc->dev, "power supply error when setting property\n");
3974 return ret;
3975 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003976
3977 mdwc->max_power = mA;
3978 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07003979}
3980
3981
3982/**
3983 * dwc3_otg_sm_work - workqueue function.
3984 *
3985 * @w: Pointer to the dwc3 otg workqueue
3986 *
3987 * NOTE: After any change in otg_state, we must reschdule the state machine.
3988 */
3989static void dwc3_otg_sm_work(struct work_struct *w)
3990{
3991 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
3992 struct dwc3 *dwc = NULL;
3993 bool work = 0;
3994 int ret = 0;
3995 unsigned long delay = 0;
3996 const char *state;
3997
3998 if (mdwc->dwc3)
3999 dwc = platform_get_drvdata(mdwc->dwc3);
4000
4001 if (!dwc) {
4002 dev_err(mdwc->dev, "dwc is NULL.\n");
4003 return;
4004 }
4005
4006 state = usb_otg_state_string(mdwc->otg_state);
4007 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08004008 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004009
4010 /* Check OTG state */
4011 switch (mdwc->otg_state) {
4012 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08004013 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07004014 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08004015 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
4016 dbg_event(0xFF, "undef_id_!bsv", 0);
4017 pm_runtime_set_active(mdwc->dev);
4018 pm_runtime_enable(mdwc->dev);
4019 pm_runtime_get_noresume(mdwc->dev);
4020 dwc3_msm_resume(mdwc);
4021 pm_runtime_put_sync(mdwc->dev);
4022 dbg_event(0xFF, "Undef NoUSB",
4023 atomic_read(&mdwc->dev->power.usage_count));
4024 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07004025 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08004026 }
Mayank Rana511f3b22016-08-02 12:00:11 -07004027
Mayank Rana08e41922017-03-02 15:25:48 -08004028 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004029 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08004030 pm_runtime_set_suspended(mdwc->dev);
4031 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07004032 /* fall-through */
4033 case OTG_STATE_B_IDLE:
4034 if (!test_bit(ID, &mdwc->inputs)) {
4035 dev_dbg(mdwc->dev, "!id\n");
4036 mdwc->otg_state = OTG_STATE_A_IDLE;
4037 work = 1;
4038 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
4039 dev_dbg(mdwc->dev, "b_sess_vld\n");
Hemant Kumar006fae42017-07-12 18:11:25 -07004040 if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
4041 queue_delayed_work(mdwc->dwc3_wq,
4042 &mdwc->sdp_check,
4043 msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
Mayank Rana511f3b22016-08-02 12:00:11 -07004044 /*
4045 * Increment pm usage count upon cable connect. Count
4046 * is decremented in OTG_STATE_B_PERIPHERAL state on
4047 * cable disconnect or in bus suspend.
4048 */
4049 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004050 dbg_event(0xFF, "BIDLE gsync",
4051 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004052 dwc3_otg_start_peripheral(mdwc, 1);
4053 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4054 work = 1;
4055 } else {
4056 dwc3_msm_gadget_vbus_draw(mdwc, 0);
4057 dev_dbg(mdwc->dev, "Cable disconnected\n");
4058 }
4059 break;
4060
4061 case OTG_STATE_B_PERIPHERAL:
4062 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
4063 !test_bit(ID, &mdwc->inputs)) {
4064 dev_dbg(mdwc->dev, "!id || !bsv\n");
4065 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004066 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004067 dwc3_otg_start_peripheral(mdwc, 0);
4068 /*
4069 * Decrement pm usage count upon cable disconnect
4070 * which was incremented upon cable connect in
4071 * OTG_STATE_B_IDLE state
4072 */
Mayank Ranace7ff8b62017-11-09 17:25:55 -08004073 pm_runtime_put_sync_suspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004074 dbg_event(0xFF, "!BSV psync",
4075 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004076 work = 1;
4077 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
4078 test_bit(B_SESS_VLD, &mdwc->inputs)) {
4079 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
4080 mdwc->otg_state = OTG_STATE_B_SUSPEND;
4081 /*
4082 * Decrement pm usage count upon bus suspend.
4083 * Count was incremented either upon cable
4084 * connect in OTG_STATE_B_IDLE or host
4085 * initiated resume after bus suspend in
4086 * OTG_STATE_B_SUSPEND state
4087 */
4088 pm_runtime_mark_last_busy(mdwc->dev);
4089 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004090 dbg_event(0xFF, "SUSP put",
4091 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004092 }
4093 break;
4094
4095 case OTG_STATE_B_SUSPEND:
4096 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
4097 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
4098 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004099 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004100 dwc3_otg_start_peripheral(mdwc, 0);
4101 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
4102 dev_dbg(mdwc->dev, "BSUSP !susp\n");
4103 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4104 /*
4105 * Increment pm usage count upon host
4106 * initiated resume. Count was decremented
4107 * upon bus suspend in
4108 * OTG_STATE_B_PERIPHERAL state.
4109 */
4110 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004111 dbg_event(0xFF, "!SUSP gsync",
4112 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004113 }
4114 break;
4115
4116 case OTG_STATE_A_IDLE:
4117 /* Switch to A-Device*/
4118 if (test_bit(ID, &mdwc->inputs)) {
4119 dev_dbg(mdwc->dev, "id\n");
4120 mdwc->otg_state = OTG_STATE_B_IDLE;
4121 mdwc->vbus_retry_count = 0;
4122 work = 1;
4123 } else {
4124 mdwc->otg_state = OTG_STATE_A_HOST;
4125 ret = dwc3_otg_start_host(mdwc, 1);
4126 if ((ret == -EPROBE_DEFER) &&
4127 mdwc->vbus_retry_count < 3) {
4128 /*
4129 * Get regulator failed as regulator driver is
4130 * not up yet. Will try to start host after 1sec
4131 */
4132 mdwc->otg_state = OTG_STATE_A_IDLE;
4133 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
4134 delay = VBUS_REG_CHECK_DELAY;
4135 work = 1;
4136 mdwc->vbus_retry_count++;
4137 } else if (ret) {
4138 dev_err(mdwc->dev, "unable to start host\n");
4139 mdwc->otg_state = OTG_STATE_A_IDLE;
4140 goto ret;
4141 }
4142 }
4143 break;
4144
4145 case OTG_STATE_A_HOST:
Manu Gautam976fdfc2016-08-18 09:27:35 +05304146 if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
4147 dev_dbg(mdwc->dev, "id || hc_died\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07004148 dwc3_otg_start_host(mdwc, 0);
4149 mdwc->otg_state = OTG_STATE_B_IDLE;
4150 mdwc->vbus_retry_count = 0;
Manu Gautam976fdfc2016-08-18 09:27:35 +05304151 mdwc->hc_died = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07004152 work = 1;
4153 } else {
4154 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004155 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004156 if (dwc)
4157 pm_runtime_resume(&dwc->xhci->dev);
4158 }
4159 break;
4160
4161 default:
4162 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
4163
4164 }
4165
4166 if (work)
4167 schedule_delayed_work(&mdwc->sm_work, delay);
4168
4169ret:
4170 return;
4171}
4172
4173#ifdef CONFIG_PM_SLEEP
4174static int dwc3_msm_pm_suspend(struct device *dev)
4175{
4176 int ret = 0;
4177 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4178 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4179
4180 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004181 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004182
4183 flush_workqueue(mdwc->dwc3_wq);
4184 if (!atomic_read(&dwc->in_lpm)) {
4185 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
4186 return -EBUSY;
4187 }
4188
4189 ret = dwc3_msm_suspend(mdwc);
4190 if (!ret)
4191 atomic_set(&mdwc->pm_suspended, 1);
4192
4193 return ret;
4194}
4195
4196static int dwc3_msm_pm_resume(struct device *dev)
4197{
4198 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004199 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004200
4201 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004202 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004203
Mayank Rana511f3b22016-08-02 12:00:11 -07004204 /* flush to avoid race in read/write of pm_suspended */
4205 flush_workqueue(mdwc->dwc3_wq);
4206 atomic_set(&mdwc->pm_suspended, 0);
4207
4208 /* kick in otg state machine */
4209 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
4210
4211 return 0;
4212}
4213#endif
4214
4215#ifdef CONFIG_PM
4216static int dwc3_msm_runtime_idle(struct device *dev)
4217{
Mayank Rana08e41922017-03-02 15:25:48 -08004218 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4219 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4220
Mayank Rana511f3b22016-08-02 12:00:11 -07004221 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004222 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004223
4224 return 0;
4225}
4226
4227static int dwc3_msm_runtime_suspend(struct device *dev)
4228{
4229 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004230 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004231
4232 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004233 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004234
4235 return dwc3_msm_suspend(mdwc);
4236}
4237
4238static int dwc3_msm_runtime_resume(struct device *dev)
4239{
4240 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004241 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004242
4243 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004244 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004245
4246 return dwc3_msm_resume(mdwc);
4247}
4248#endif
4249
4250static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
4251 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
4252 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
4253 dwc3_msm_runtime_idle)
4254};
4255
4256static const struct of_device_id of_dwc3_matach[] = {
4257 {
4258 .compatible = "qcom,dwc-usb3-msm",
4259 },
4260 { },
4261};
4262MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4263
4264static struct platform_driver dwc3_msm_driver = {
4265 .probe = dwc3_msm_probe,
4266 .remove = dwc3_msm_remove,
4267 .driver = {
4268 .name = "msm-dwc3",
4269 .pm = &dwc3_msm_dev_pm_ops,
4270 .of_match_table = of_dwc3_matach,
4271 },
4272};
4273
4274MODULE_LICENSE("GPL v2");
4275MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4276
4277static int dwc3_msm_init(void)
4278{
4279 return platform_driver_register(&dwc3_msm_driver);
4280}
4281module_init(dwc3_msm_init);
4282
4283static void __exit dwc3_msm_exit(void)
4284{
4285 platform_driver_unregister(&dwc3_msm_driver);
4286}
4287module_exit(dwc3_msm_exit);