blob: dadd61ee237b4115c410445ec26dd7c817bc1b87 [file] [log] [blame]
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08001/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
Mayank Rana511f3b22016-08-02 12:00:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/cpu.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/pm_runtime.h>
22#include <linux/ratelimit.h>
23#include <linux/interrupt.h>
Jack Phambbe27962017-03-23 18:42:26 -070024#include <asm/dma-iommu.h>
25#include <linux/iommu.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070026#include <linux/ioport.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/delay.h>
32#include <linux/of.h>
33#include <linux/of_platform.h>
34#include <linux/of_gpio.h>
35#include <linux/list.h>
36#include <linux/uaccess.h>
37#include <linux/usb/ch9.h>
38#include <linux/usb/gadget.h>
39#include <linux/usb/of.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070040#include <linux/regulator/consumer.h>
41#include <linux/pm_wakeup.h>
42#include <linux/power_supply.h>
43#include <linux/cdev.h>
44#include <linux/completion.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070045#include <linux/msm-bus.h>
46#include <linux/irq.h>
47#include <linux/extcon.h>
Amit Nischal4d278212016-06-06 17:54:34 +053048#include <linux/reset.h>
Hemant Kumar633dc332016-08-10 13:41:05 -070049#include <linux/clk/qcom.h>
Mayank Rana511f3b22016-08-02 12:00:11 -070050
51#include "power.h"
52#include "core.h"
53#include "gadget.h"
54#include "dbm.h"
55#include "debug.h"
56#include "xhci.h"
57
Hemant Kumar006fae42017-07-12 18:11:25 -070058#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
59
Mayank Rana511f3b22016-08-02 12:00:11 -070060/* time out to wait for USB cable status notification (in ms)*/
61#define SM_INIT_TIMEOUT 30000
62
63/* AHB2PHY register offsets */
64#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
65
66/* AHB2PHY read/write waite value */
67#define ONE_READ_WRITE_WAIT 0x11
68
69/* cpu to fix usb interrupt */
70static int cpu_to_affin;
71module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
72MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
73
Mayank Ranaf70d8212017-06-12 14:02:07 -070074/* override for USB speed */
75static int override_usb_speed;
76module_param(override_usb_speed, int, 0644);
77MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
78
Mayank Rana511f3b22016-08-02 12:00:11 -070079/* XHCI registers */
80#define USB3_HCSPARAMS1 (0x4)
81#define USB3_PORTSC (0x420)
82
83/**
84 * USB QSCRATCH Hardware registers
85 *
86 */
87#define QSCRATCH_REG_OFFSET (0x000F8800)
88#define QSCRATCH_GENERAL_CFG (QSCRATCH_REG_OFFSET + 0x08)
89#define CGCTL_REG (QSCRATCH_REG_OFFSET + 0x28)
90#define PWR_EVNT_IRQ_STAT_REG (QSCRATCH_REG_OFFSET + 0x58)
91#define PWR_EVNT_IRQ_MASK_REG (QSCRATCH_REG_OFFSET + 0x5C)
92
93#define PWR_EVNT_POWERDOWN_IN_P3_MASK BIT(2)
94#define PWR_EVNT_POWERDOWN_OUT_P3_MASK BIT(3)
95#define PWR_EVNT_LPM_IN_L2_MASK BIT(4)
96#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5)
97#define PWR_EVNT_LPM_OUT_L1_MASK BIT(13)
98
99/* QSCRATCH_GENERAL_CFG register bit offset */
100#define PIPE_UTMI_CLK_SEL BIT(0)
101#define PIPE3_PHYSTATUS_SW BIT(3)
102#define PIPE_UTMI_CLK_DIS BIT(8)
103
104#define HS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x10)
105#define UTMI_OTG_VBUS_VALID BIT(20)
106#define SW_SESSVLD_SEL BIT(28)
107
108#define SS_PHY_CTRL_REG (QSCRATCH_REG_OFFSET + 0x30)
109#define LANE0_PWR_PRESENT BIT(24)
110
111/* GSI related registers */
112#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
113#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
114
115#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
116#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
117#define GSI_CLK_EN_MASK BIT(12)
118#define BLOCK_GSI_WR_GO_MASK BIT(1)
119#define GSI_EN_MASK BIT(0)
120
121#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
122#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
123#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
124#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
125
126#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
127#define GSI_WR_CTRL_STATE_MASK BIT(15)
128
Mayank Ranaf4918d32016-12-15 13:35:55 -0800129#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
130#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(n) (n << 22)
131#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
132#define DWC3_GEVENT_TYPE_GSI 0x3
133
Mayank Rana511f3b22016-08-02 12:00:11 -0700134struct dwc3_msm_req_complete {
135 struct list_head list_item;
136 struct usb_request *req;
137 void (*orig_complete)(struct usb_ep *ep,
138 struct usb_request *req);
139};
140
141enum dwc3_id_state {
142 DWC3_ID_GROUND = 0,
143 DWC3_ID_FLOAT,
144};
145
146/* for type c cable */
147enum plug_orientation {
148 ORIENTATION_NONE,
149 ORIENTATION_CC1,
150 ORIENTATION_CC2,
151};
152
Mayank Ranad339abe2017-05-31 09:19:49 -0700153enum msm_usb_irq {
154 HS_PHY_IRQ,
155 PWR_EVNT_IRQ,
156 DP_HS_PHY_IRQ,
157 DM_HS_PHY_IRQ,
158 SS_PHY_IRQ,
159 USB_MAX_IRQ
160};
161
162struct usb_irq {
163 char *name;
164 int irq;
165 bool enable;
166};
167
168static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
169 {"hs_phy_irq", 0},
170 {"pwr_event_irq", 0},
171 {"dp_hs_phy_irq", 0},
172 {"dm_hs_phy_irq", 0},
173 {"ss_phy_irq", 0},
174};
175
Mayank Rana511f3b22016-08-02 12:00:11 -0700176/* Input bits to state machine (mdwc->inputs) */
177
178#define ID 0
179#define B_SESS_VLD 1
180#define B_SUSPEND 2
181
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530182#define PM_QOS_SAMPLE_SEC 2
183#define PM_QOS_THRESHOLD 400
184
Mayank Rana511f3b22016-08-02 12:00:11 -0700185struct dwc3_msm {
186 struct device *dev;
187 void __iomem *base;
188 void __iomem *ahb2phy_base;
189 struct platform_device *dwc3;
Jack Phambbe27962017-03-23 18:42:26 -0700190 struct dma_iommu_mapping *iommu_map;
Mayank Rana511f3b22016-08-02 12:00:11 -0700191 const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
192 struct list_head req_complete_list;
193 struct clk *xo_clk;
194 struct clk *core_clk;
195 long core_clk_rate;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800196 long core_clk_rate_hs;
Mayank Rana511f3b22016-08-02 12:00:11 -0700197 struct clk *iface_clk;
198 struct clk *sleep_clk;
199 struct clk *utmi_clk;
200 unsigned int utmi_clk_rate;
201 struct clk *utmi_clk_src;
202 struct clk *bus_aggr_clk;
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +0530203 struct clk *noc_aggr_clk;
Mayank Rana511f3b22016-08-02 12:00:11 -0700204 struct clk *cfg_ahb_clk;
Amit Nischal4d278212016-06-06 17:54:34 +0530205 struct reset_control *core_reset;
Mayank Rana511f3b22016-08-02 12:00:11 -0700206 struct regulator *dwc3_gdsc;
207
208 struct usb_phy *hs_phy, *ss_phy;
209
210 struct dbm *dbm;
211
212 /* VBUS regulator for host mode */
213 struct regulator *vbus_reg;
214 int vbus_retry_count;
215 bool resume_pending;
216 atomic_t pm_suspended;
Mayank Ranad339abe2017-05-31 09:19:49 -0700217 struct usb_irq wakeup_irq[USB_MAX_IRQ];
Mayank Rana511f3b22016-08-02 12:00:11 -0700218 struct work_struct resume_work;
219 struct work_struct restart_usb_work;
220 bool in_restart;
221 struct workqueue_struct *dwc3_wq;
222 struct delayed_work sm_work;
223 unsigned long inputs;
224 unsigned int max_power;
225 bool charging_disabled;
226 enum usb_otg_state otg_state;
Mayank Rana511f3b22016-08-02 12:00:11 -0700227 u32 bus_perf_client;
228 struct msm_bus_scale_pdata *bus_scale_table;
229 struct power_supply *usb_psy;
Jack Pham4b8b4ae2016-08-09 11:36:34 -0700230 struct work_struct vbus_draw_work;
Mayank Rana511f3b22016-08-02 12:00:11 -0700231 bool in_host_mode;
Mayank Rana7e781e72017-12-13 17:27:23 -0800232 bool in_device_mode;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800233 enum usb_device_speed max_rh_port_speed;
Mayank Rana511f3b22016-08-02 12:00:11 -0700234 unsigned int tx_fifo_size;
235 bool vbus_active;
236 bool suspend;
237 bool disable_host_mode_pm;
Mayank Ranad339abe2017-05-31 09:19:49 -0700238 bool use_pdc_interrupts;
Mayank Rana511f3b22016-08-02 12:00:11 -0700239 enum dwc3_id_state id_state;
240 unsigned long lpm_flags;
241#define MDWC3_SS_PHY_SUSPEND BIT(0)
242#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY BIT(1)
243#define MDWC3_POWER_COLLAPSE BIT(2)
244
245 unsigned int irq_to_affin;
246 struct notifier_block dwc3_cpu_notifier;
Manu Gautam976fdfc2016-08-18 09:27:35 +0530247 struct notifier_block usbdev_nb;
248 bool hc_died;
Pratham Pratapd76a1782017-11-14 20:50:31 +0530249 /* for usb connector either type-C or microAB */
250 bool type_c;
251 /* whether to vote for VBUS reg in host mode */
252 bool no_vbus_vote_type_c;
Mayank Rana511f3b22016-08-02 12:00:11 -0700253
254 struct extcon_dev *extcon_vbus;
255 struct extcon_dev *extcon_id;
Mayank Rana51958172017-02-28 14:49:21 -0800256 struct extcon_dev *extcon_eud;
Mayank Rana511f3b22016-08-02 12:00:11 -0700257 struct notifier_block vbus_nb;
258 struct notifier_block id_nb;
Mayank Rana51958172017-02-28 14:49:21 -0800259 struct notifier_block eud_event_nb;
Mayank Rana54d60432017-07-18 12:10:04 -0700260 struct notifier_block host_restart_nb;
Mayank Rana511f3b22016-08-02 12:00:11 -0700261
Jack Pham4d4e9342016-12-07 19:25:02 -0800262 struct notifier_block host_nb;
263
Mayank Rana511f3b22016-08-02 12:00:11 -0700264 atomic_t in_p3;
265 unsigned int lpm_to_suspend_delay;
266 bool init;
267 enum plug_orientation typec_orientation;
Mayank Ranaf4918d32016-12-15 13:35:55 -0800268 u32 num_gsi_event_buffers;
269 struct dwc3_event_buffer **gsi_ev_buff;
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +0530270 int pm_qos_latency;
271 struct pm_qos_request pm_qos_req_dma;
272 struct delayed_work perf_vote_work;
Hemant Kumar006fae42017-07-12 18:11:25 -0700273 struct delayed_work sdp_check;
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +0530274 struct mutex suspend_resume_mutex;
Mayank Rana511f3b22016-08-02 12:00:11 -0700275};
276
277#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
278#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
279#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
280
281#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */
282#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */
283#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */
284
285#define USB_SSPHY_1P8_VOL_MIN 1800000 /* uV */
286#define USB_SSPHY_1P8_VOL_MAX 1800000 /* uV */
287#define USB_SSPHY_1P8_HPM_LOAD 23000 /* uA */
288
289#define DSTS_CONNECTSPD_SS 0x4
290
291
292static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
293static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
Mayank Ranaf4918d32016-12-15 13:35:55 -0800294static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
Mayank Rana54d60432017-07-18 12:10:04 -0700295static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
296 unsigned long event, void *ptr);
Mayank Ranaf70d8212017-06-12 14:02:07 -0700297
298static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
299{
300
301 return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
302 (speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
303 && (speed <= dwc->maximum_speed));
304}
305
Mayank Rana511f3b22016-08-02 12:00:11 -0700306/**
307 *
308 * Read register with debug info.
309 *
310 * @base - DWC3 base virtual address.
311 * @offset - register offset.
312 *
313 * @return u32
314 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700315static inline u32 dwc3_msm_read_reg(void __iomem *base, u32 offset)
Mayank Rana511f3b22016-08-02 12:00:11 -0700316{
317 u32 val = ioread32(base + offset);
318 return val;
319}
320
321/**
322 * Read register masked field with debug info.
323 *
324 * @base - DWC3 base virtual address.
325 * @offset - register offset.
326 * @mask - register bitmask.
327 *
328 * @return u32
329 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700330static inline u32 dwc3_msm_read_reg_field(void __iomem *base,
Mayank Rana511f3b22016-08-02 12:00:11 -0700331 u32 offset,
332 const u32 mask)
333{
Mayank Ranad796cab2017-07-11 15:34:12 -0700334 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700335 u32 val = ioread32(base + offset);
336
337 val &= mask; /* clear other bits */
338 val >>= shift;
339 return val;
340}
341
342/**
343 *
344 * Write register with debug info.
345 *
346 * @base - DWC3 base virtual address.
347 * @offset - register offset.
348 * @val - value to write.
349 *
350 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700351static inline void dwc3_msm_write_reg(void __iomem *base, u32 offset, u32 val)
Mayank Rana511f3b22016-08-02 12:00:11 -0700352{
353 iowrite32(val, base + offset);
354}
355
356/**
357 * Write register masked field with debug info.
358 *
359 * @base - DWC3 base virtual address.
360 * @offset - register offset.
361 * @mask - register bitmask.
362 * @val - value to write.
363 *
364 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700365static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700366 const u32 mask, u32 val)
367{
Mayank Ranad796cab2017-07-11 15:34:12 -0700368 u32 shift = __ffs(mask);
Mayank Rana511f3b22016-08-02 12:00:11 -0700369 u32 tmp = ioread32(base + offset);
370
371 tmp &= ~mask; /* clear written bits */
372 val = tmp | (val << shift);
373 iowrite32(val, base + offset);
374}
375
376/**
377 * Write register and read back masked value to confirm it is written
378 *
379 * @base - DWC3 base virtual address.
380 * @offset - register offset.
381 * @mask - register bitmask specifying what should be updated
382 * @val - value to write.
383 *
384 */
Stephen Boyda247bae2017-06-15 14:09:08 -0700385static inline void dwc3_msm_write_readback(void __iomem *base, u32 offset,
Mayank Rana511f3b22016-08-02 12:00:11 -0700386 const u32 mask, u32 val)
387{
388 u32 write_val, tmp = ioread32(base + offset);
389
390 tmp &= ~mask; /* retain other bits */
391 write_val = tmp | val;
392
393 iowrite32(write_val, base + offset);
394
395 /* Read back to see if val was written */
396 tmp = ioread32(base + offset);
397 tmp &= mask; /* clear other bits */
398
399 if (tmp != val)
400 pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
401 __func__, val, offset);
402}
403
Hemant Kumar8e4c2f22017-01-24 18:13:07 -0800404static bool dwc3_msm_is_ss_rhport_connected(struct dwc3_msm *mdwc)
405{
406 int i, num_ports;
407 u32 reg;
408
409 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
410 num_ports = HCS_MAX_PORTS(reg);
411
412 for (i = 0; i < num_ports; i++) {
413 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
414 if ((reg & PORT_CONNECT) && DEV_SUPERSPEED(reg))
415 return true;
416 }
417
418 return false;
419}
420
Mayank Rana511f3b22016-08-02 12:00:11 -0700421static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
422{
423 int i, num_ports;
424 u32 reg;
425
426 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
427 num_ports = HCS_MAX_PORTS(reg);
428
429 for (i = 0; i < num_ports; i++) {
430 reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
431 if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
432 return true;
433 }
434
435 return false;
436}
437
438static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
439{
440 u8 speed;
441
442 speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
443 return !!(speed & DSTS_CONNECTSPD_SS);
444}
445
446static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
447{
448 if (mdwc->in_host_mode)
449 return dwc3_msm_is_host_superspeed(mdwc);
450
451 return dwc3_msm_is_dev_superspeed(mdwc);
452}
453
454#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
455/**
456 * Configure the DBM with the BAM's data fifo.
457 * This function is called by the USB BAM Driver
458 * upon initialization.
459 *
460 * @ep - pointer to usb endpoint.
461 * @addr - address of data fifo.
462 * @size - size of data fifo.
463 *
464 */
Mayank Rana52594a82017-11-06 16:58:04 -0800465int msm_data_fifo_config(struct usb_ep *ep, unsigned long addr,
Mayank Rana511f3b22016-08-02 12:00:11 -0700466 u32 size, u8 dst_pipe_idx)
467{
468 struct dwc3_ep *dep = to_dwc3_ep(ep);
469 struct dwc3 *dwc = dep->dwc;
470 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
471
472 dev_dbg(mdwc->dev, "%s\n", __func__);
473
474 return dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
475 dst_pipe_idx);
476}
477
478
479/**
480* Cleanups for msm endpoint on request complete.
481*
482* Also call original request complete.
483*
484* @usb_ep - pointer to usb_ep instance.
485* @request - pointer to usb_request instance.
486*
487* @return int - 0 on success, negative on error.
488*/
489static void dwc3_msm_req_complete_func(struct usb_ep *ep,
490 struct usb_request *request)
491{
492 struct dwc3_ep *dep = to_dwc3_ep(ep);
493 struct dwc3 *dwc = dep->dwc;
494 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
495 struct dwc3_msm_req_complete *req_complete = NULL;
496
497 /* Find original request complete function and remove it from list */
498 list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
499 if (req_complete->req == request)
500 break;
501 }
502 if (!req_complete || req_complete->req != request) {
503 dev_err(dep->dwc->dev, "%s: could not find the request\n",
504 __func__);
505 return;
506 }
507 list_del(&req_complete->list_item);
508
509 /*
510 * Release another one TRB to the pool since DBM queue took 2 TRBs
511 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
512 * released only one.
513 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700514 dep->trb_dequeue++;
Mayank Rana511f3b22016-08-02 12:00:11 -0700515
516 /* Unconfigure dbm ep */
517 dbm_ep_unconfig(mdwc->dbm, dep->number);
518
519 /*
520 * If this is the last endpoint we unconfigured, than reset also
521 * the event buffers; unless unconfiguring the ep due to lpm,
522 * in which case the event buffer only gets reset during the
523 * block reset.
524 */
525 if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
526 !dbm_reset_ep_after_lpm(mdwc->dbm))
527 dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
528
529 /*
530 * Call original complete function, notice that dwc->lock is already
531 * taken by the caller of this function (dwc3_gadget_giveback()).
532 */
533 request->complete = req_complete->orig_complete;
534 if (request->complete)
535 request->complete(ep, request);
536
537 kfree(req_complete);
538}
539
540
541/**
542* Helper function
543*
544* Reset DBM endpoint.
545*
546* @mdwc - pointer to dwc3_msm instance.
547* @dep - pointer to dwc3_ep instance.
548*
549* @return int - 0 on success, negative on error.
550*/
551static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
552{
553 int ret;
554
555 dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
556
557 /* Reset the dbm endpoint */
558 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
559 if (ret) {
560 dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
561 __func__);
562 return ret;
563 }
564
565 /*
566 * The necessary delay between asserting and deasserting the dbm ep
567 * reset is based on the number of active endpoints. If there is more
568 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
569 * delay will suffice.
570 */
571 if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
572 usleep_range(1000, 1200);
573 else
574 udelay(10);
575 ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
576 if (ret) {
577 dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
578 __func__);
579 return ret;
580 }
581
582 return 0;
583}
584
585/**
586* Reset the DBM endpoint which is linked to the given USB endpoint.
587*
588* @usb_ep - pointer to usb_ep instance.
589*
590* @return int - 0 on success, negative on error.
591*/
592
593int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
594{
595 struct dwc3_ep *dep = to_dwc3_ep(ep);
596 struct dwc3 *dwc = dep->dwc;
597 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
598
599 return __dwc3_msm_dbm_ep_reset(mdwc, dep);
600}
601EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
602
603
604/**
605* Helper function.
606* See the header of the dwc3_msm_ep_queue function.
607*
608* @dwc3_ep - pointer to dwc3_ep instance.
609* @req - pointer to dwc3_request instance.
610*
611* @return int - 0 on success, negative on error.
612*/
613static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
614{
615 struct dwc3_trb *trb;
616 struct dwc3_trb *trb_link;
617 struct dwc3_gadget_ep_cmd_params params;
618 u32 cmd;
619 int ret = 0;
620
Mayank Rana83ad5822016-08-09 14:17:22 -0700621 /* We push the request to the dep->started_list list to indicate that
Mayank Rana511f3b22016-08-02 12:00:11 -0700622 * this request is issued with start transfer. The request will be out
623 * from this list in 2 cases. The first is that the transfer will be
624 * completed (not if the transfer is endless using a circular TRBs with
625 * with link TRB). The second case is an option to do stop stransfer,
626 * this can be initiated by the function driver when calling dequeue.
627 */
Mayank Rana83ad5822016-08-09 14:17:22 -0700628 req->started = true;
629 list_add_tail(&req->list, &dep->started_list);
Mayank Rana511f3b22016-08-02 12:00:11 -0700630
631 /* First, prepare a normal TRB, point to the fake buffer */
Mayank Rana9ca186c2017-06-19 17:57:21 -0700632 trb = &dep->trb_pool[dep->trb_enqueue];
633 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700634 memset(trb, 0, sizeof(*trb));
635
636 req->trb = trb;
637 trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
638 trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
639 trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
640 DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
641 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
642
643 /* Second, prepare a Link TRB that points to the first TRB*/
Mayank Rana9ca186c2017-06-19 17:57:21 -0700644 trb_link = &dep->trb_pool[dep->trb_enqueue];
645 dwc3_ep_inc_enq(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700646 memset(trb_link, 0, sizeof(*trb_link));
647
648 trb_link->bpl = lower_32_bits(req->trb_dma);
649 trb_link->bph = DBM_TRB_BIT |
650 DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
651 trb_link->size = 0;
652 trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
653
654 /*
655 * Now start the transfer
656 */
657 memset(&params, 0, sizeof(params));
658 params.param0 = 0; /* TDAddr High */
659 params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
660
661 /* DBM requires IOC to be set */
662 cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
Mayank Rana83ad5822016-08-09 14:17:22 -0700663 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700664 if (ret < 0) {
665 dev_dbg(dep->dwc->dev,
666 "%s: failed to send STARTTRANSFER command\n",
667 __func__);
668
669 list_del(&req->list);
670 return ret;
671 }
672 dep->flags |= DWC3_EP_BUSY;
Mayank Rana83ad5822016-08-09 14:17:22 -0700673 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700674
675 return ret;
676}
677
678/**
679* Queue a usb request to the DBM endpoint.
680* This function should be called after the endpoint
681* was enabled by the ep_enable.
682*
683* This function prepares special structure of TRBs which
684* is familiar with the DBM HW, so it will possible to use
685* this endpoint in DBM mode.
686*
687* The TRBs prepared by this function, is one normal TRB
688* which point to a fake buffer, followed by a link TRB
689* that points to the first TRB.
690*
691* The API of this function follow the regular API of
692* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
693*
694* @usb_ep - pointer to usb_ep instance.
695* @request - pointer to usb_request instance.
696* @gfp_flags - possible flags.
697*
698* @return int - 0 on success, negative on error.
699*/
700static int dwc3_msm_ep_queue(struct usb_ep *ep,
701 struct usb_request *request, gfp_t gfp_flags)
702{
703 struct dwc3_request *req = to_dwc3_request(request);
704 struct dwc3_ep *dep = to_dwc3_ep(ep);
705 struct dwc3 *dwc = dep->dwc;
706 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
707 struct dwc3_msm_req_complete *req_complete;
708 unsigned long flags;
709 int ret = 0, size;
710 u8 bam_pipe;
711 bool producer;
712 bool disable_wb;
713 bool internal_mem;
714 bool ioc;
715 bool superspeed;
716
717 if (!(request->udc_priv & MSM_SPS_MODE)) {
718 /* Not SPS mode, call original queue */
719 dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
720 __func__);
721
722 return (mdwc->original_ep_ops[dep->number])->queue(ep,
723 request,
724 gfp_flags);
725 }
726
727 /* HW restriction regarding TRB size (8KB) */
728 if (req->request.length < 0x2000) {
729 dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
730 return -EINVAL;
731 }
732
733 /*
734 * Override req->complete function, but before doing that,
735 * store it's original pointer in the req_complete_list.
736 */
737 req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
738 if (!req_complete)
739 return -ENOMEM;
740
741 req_complete->req = request;
742 req_complete->orig_complete = request->complete;
743 list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
744 request->complete = dwc3_msm_req_complete_func;
745
746 /*
747 * Configure the DBM endpoint
748 */
749 bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
750 producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
751 disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
752 internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
753 ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
754
755 ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
756 disable_wb, internal_mem, ioc);
757 if (ret < 0) {
758 dev_err(mdwc->dev,
759 "error %d after calling dbm_ep_config\n", ret);
760 return ret;
761 }
762
763 dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
764 __func__, request, ep->name, request->length);
765 size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
766 dbm_event_buffer_config(mdwc->dbm,
767 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
768 dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
769 DWC3_GEVNTSIZ_SIZE(size));
770
771 /*
772 * We must obtain the lock of the dwc3 core driver,
773 * including disabling interrupts, so we will be sure
774 * that we are the only ones that configure the HW device
775 * core and ensure that we queuing the request will finish
776 * as soon as possible so we will release back the lock.
777 */
778 spin_lock_irqsave(&dwc->lock, flags);
779 if (!dep->endpoint.desc) {
780 dev_err(mdwc->dev,
781 "%s: trying to queue request %p to disabled ep %s\n",
782 __func__, request, ep->name);
783 ret = -EPERM;
784 goto err;
785 }
786
787 if (dep->number == 0 || dep->number == 1) {
788 dev_err(mdwc->dev,
789 "%s: trying to queue dbm request %p to control ep %s\n",
790 __func__, request, ep->name);
791 ret = -EPERM;
792 goto err;
793 }
794
795
Mayank Rana83ad5822016-08-09 14:17:22 -0700796 if (dep->trb_dequeue != dep->trb_enqueue ||
797 !list_empty(&dep->pending_list)
798 || !list_empty(&dep->started_list)) {
Mayank Rana511f3b22016-08-02 12:00:11 -0700799 dev_err(mdwc->dev,
800 "%s: trying to queue dbm request %p tp ep %s\n",
801 __func__, request, ep->name);
802 ret = -EPERM;
803 goto err;
804 } else {
Mayank Rana83ad5822016-08-09 14:17:22 -0700805 dep->trb_dequeue = 0;
806 dep->trb_enqueue = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -0700807 }
808
809 ret = __dwc3_msm_ep_queue(dep, req);
810 if (ret < 0) {
811 dev_err(mdwc->dev,
812 "error %d after calling __dwc3_msm_ep_queue\n", ret);
813 goto err;
814 }
815
816 spin_unlock_irqrestore(&dwc->lock, flags);
817 superspeed = dwc3_msm_is_dev_superspeed(mdwc);
818 dbm_set_speed(mdwc->dbm, (u8)superspeed);
819
820 return 0;
821
822err:
823 spin_unlock_irqrestore(&dwc->lock, flags);
824 kfree(req_complete);
825 return ret;
826}
827
828/*
829* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
830*
831* @usb_ep - pointer to usb_ep instance.
832*
833* @return int - XferRscIndex
834*/
835static inline int gsi_get_xfer_index(struct usb_ep *ep)
836{
837 struct dwc3_ep *dep = to_dwc3_ep(ep);
838
839 return dep->resource_index;
840}
841
842/*
843* Fills up the GSI channel information needed in call to IPA driver
844* for GSI channel creation.
845*
846* @usb_ep - pointer to usb_ep instance.
847* @ch_info - output parameter with requested channel info
848*/
849static void gsi_get_channel_info(struct usb_ep *ep,
850 struct gsi_channel_info *ch_info)
851{
852 struct dwc3_ep *dep = to_dwc3_ep(ep);
853 int last_trb_index = 0;
854 struct dwc3 *dwc = dep->dwc;
855 struct usb_gsi_request *request = ch_info->ch_req;
856
857 /* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
858 ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
Mayank Ranaac776d12017-04-18 16:56:13 -0700859 DWC3_DEP_BASE(dep->number) + DWC3_DEPCMD);
860
Mayank Rana511f3b22016-08-02 12:00:11 -0700861 ch_info->depcmd_hi_addr = 0;
862
863 ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
864 &dep->trb_pool[0]);
865 /* Convert to multipled of 1KB */
866 ch_info->const_buffer_size = request->buf_len/1024;
867
868 /* IN direction */
869 if (dep->direction) {
870 /*
871 * Multiply by size of each TRB for xfer_ring_len in bytes.
872 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
873 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
874 */
875 ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
876 last_trb_index = 2 * request->num_bufs + 2;
877 } else { /* OUT direction */
878 /*
879 * Multiply by size of each TRB for xfer_ring_len in bytes.
880 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
881 * LINK TRB.
882 */
Mayank Rana64d136b2016-11-01 21:01:34 -0700883 ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10;
884 last_trb_index = request->num_bufs + 2;
Mayank Rana511f3b22016-08-02 12:00:11 -0700885 }
886
887 /* Store last 16 bits of LINK TRB address as per GSI hw requirement */
888 ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
889 &dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
890 ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
891 DWC3_GEVNTCOUNT(ep->ep_intr_num));
892 ch_info->gevntcount_hi_addr = 0;
893
894 dev_dbg(dwc->dev,
895 "depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
896 ch_info->depcmd_low_addr, ch_info->last_trb_addr,
897 ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
898}
899
900/*
901* Perform StartXfer on GSI EP. Stores XferRscIndex.
902*
903* @usb_ep - pointer to usb_ep instance.
904*
905* @return int - 0 on success
906*/
907static int gsi_startxfer_for_ep(struct usb_ep *ep)
908{
909 int ret;
910 struct dwc3_gadget_ep_cmd_params params;
911 u32 cmd;
912 struct dwc3_ep *dep = to_dwc3_ep(ep);
913 struct dwc3 *dwc = dep->dwc;
914
915 memset(&params, 0, sizeof(params));
916 params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
917 params.param0 |= (ep->ep_intr_num << 16);
918 params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
919 &dep->trb_pool[0]));
920 cmd = DWC3_DEPCMD_STARTTRANSFER;
921 cmd |= DWC3_DEPCMD_PARAM(0);
Mayank Rana83ad5822016-08-09 14:17:22 -0700922 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -0700923
924 if (ret < 0)
925 dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
Mayank Rana83ad5822016-08-09 14:17:22 -0700926 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
Mayank Rana511f3b22016-08-02 12:00:11 -0700927 dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
928 return ret;
929}
930
931/*
932* Store Ring Base and Doorbell Address for GSI EP
933* for GSI channel creation.
934*
935* @usb_ep - pointer to usb_ep instance.
936* @dbl_addr - Doorbell address obtained from IPA driver
937*/
938static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
939{
940 struct dwc3_ep *dep = to_dwc3_ep(ep);
941 struct dwc3 *dwc = dep->dwc;
942 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
943 int n = ep->ep_intr_num - 1;
944
945 dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
946 dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
947 dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
948
949 dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
950 dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
951 dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
952 dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
953}
954
955/*
Mayank Rana64d136b2016-11-01 21:01:34 -0700956* Rings Doorbell for GSI Channel
Mayank Rana511f3b22016-08-02 12:00:11 -0700957*
958* @usb_ep - pointer to usb_ep instance.
959* @request - pointer to GSI request. This is used to pass in the
960* address of the GSI doorbell obtained from IPA driver
961*/
Mayank Rana64d136b2016-11-01 21:01:34 -0700962static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request)
Mayank Rana511f3b22016-08-02 12:00:11 -0700963{
964 void __iomem *gsi_dbl_address_lsb;
965 void __iomem *gsi_dbl_address_msb;
966 dma_addr_t offset;
967 u64 dbl_addr = *((u64 *)request->buf_base_addr);
968 u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
969 u32 dbl_hi_addr = (dbl_addr >> 32);
Mayank Rana511f3b22016-08-02 12:00:11 -0700970 struct dwc3_ep *dep = to_dwc3_ep(ep);
971 struct dwc3 *dwc = dep->dwc;
972 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Rana64d136b2016-11-01 21:01:34 -0700973 int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2)
974 : (request->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -0700975
976 gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
977 dbl_lo_addr, sizeof(u32));
978 if (!gsi_dbl_address_lsb)
979 dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
980
981 gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
982 dbl_hi_addr, sizeof(u32));
983 if (!gsi_dbl_address_msb)
984 dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
985
986 offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
Mayank Rana64d136b2016-11-01 21:01:34 -0700987 dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n",
988 &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name);
Mayank Rana511f3b22016-08-02 12:00:11 -0700989
990 writel_relaxed(offset, gsi_dbl_address_lsb);
991 writel_relaxed(0, gsi_dbl_address_msb);
992}
993
994/*
995* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
996*
997* @usb_ep - pointer to usb_ep instance.
998* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
999*
1000* @return int - 0 on success
1001*/
1002static int gsi_updatexfer_for_ep(struct usb_ep *ep,
1003 struct usb_gsi_request *request)
1004{
1005 int i;
1006 int ret;
1007 u32 cmd;
1008 int num_trbs = request->num_bufs + 1;
1009 struct dwc3_trb *trb;
1010 struct dwc3_gadget_ep_cmd_params params;
1011 struct dwc3_ep *dep = to_dwc3_ep(ep);
1012 struct dwc3 *dwc = dep->dwc;
1013
1014 for (i = 0; i < num_trbs - 1; i++) {
1015 trb = &dep->trb_pool[i];
1016 trb->ctrl |= DWC3_TRB_CTRL_HWO;
1017 }
1018
1019 memset(&params, 0, sizeof(params));
1020 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1021 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
Mayank Rana83ad5822016-08-09 14:17:22 -07001022 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001023 dep->flags |= DWC3_EP_BUSY;
1024 if (ret < 0)
1025 dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
1026 return ret;
1027}
1028
1029/*
1030* Perform EndXfer on particular GSI EP.
1031*
1032* @usb_ep - pointer to usb_ep instance.
1033*/
1034static void gsi_endxfer_for_ep(struct usb_ep *ep)
1035{
1036 struct dwc3_ep *dep = to_dwc3_ep(ep);
1037 struct dwc3 *dwc = dep->dwc;
1038
1039 dwc3_stop_active_transfer(dwc, dep->number, true);
1040}
1041
1042/*
1043* Allocates and configures TRBs for GSI EPs.
1044*
1045* @usb_ep - pointer to usb_ep instance.
1046* @request - pointer to GSI request.
1047*
1048* @return int - 0 on success
1049*/
1050static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
1051{
1052 int i = 0;
1053 dma_addr_t buffer_addr = req->dma;
1054 struct dwc3_ep *dep = to_dwc3_ep(ep);
1055 struct dwc3 *dwc = dep->dwc;
1056 struct dwc3_trb *trb;
1057 int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
Mayank Rana64d136b2016-11-01 21:01:34 -07001058 : (req->num_bufs + 2);
Mayank Rana511f3b22016-08-02 12:00:11 -07001059
Mayank Ranae0a427e2017-09-18 16:56:26 -07001060 dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
1061 num_trbs * sizeof(struct dwc3_trb),
1062 &dep->trb_pool_dma, GFP_KERNEL);
1063
1064 if (!dep->trb_pool) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001065 dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
1066 dep->name);
1067 return -ENOMEM;
1068 }
1069
1070 dep->num_trbs = num_trbs;
Mayank Rana511f3b22016-08-02 12:00:11 -07001071 /* IN direction */
1072 if (dep->direction) {
1073 for (i = 0; i < num_trbs ; i++) {
1074 trb = &dep->trb_pool[i];
1075 memset(trb, 0, sizeof(*trb));
1076 /* Set up first n+1 TRBs for ZLPs */
1077 if (i < (req->num_bufs + 1)) {
1078 trb->bpl = 0;
1079 trb->bph = 0;
1080 trb->size = 0;
1081 trb->ctrl = DWC3_TRBCTL_NORMAL
1082 | DWC3_TRB_CTRL_IOC;
1083 continue;
1084 }
1085
1086 /* Setup n TRBs pointing to valid buffers */
1087 trb->bpl = lower_32_bits(buffer_addr);
1088 trb->bph = 0;
1089 trb->size = 0;
1090 trb->ctrl = DWC3_TRBCTL_NORMAL
1091 | DWC3_TRB_CTRL_IOC;
1092 buffer_addr += req->buf_len;
1093
1094 /* Set up the Link TRB at the end */
1095 if (i == (num_trbs - 1)) {
1096 trb->bpl = dwc3_trb_dma_offset(dep,
1097 &dep->trb_pool[0]);
1098 trb->bph = (1 << 23) | (1 << 21)
1099 | (ep->ep_intr_num << 16);
1100 trb->size = 0;
1101 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1102 | DWC3_TRB_CTRL_HWO;
1103 }
1104 }
1105 } else { /* OUT direction */
1106
1107 for (i = 0; i < num_trbs ; i++) {
1108
1109 trb = &dep->trb_pool[i];
1110 memset(trb, 0, sizeof(*trb));
Mayank Rana64d136b2016-11-01 21:01:34 -07001111 /* Setup LINK TRB to start with TRB ring */
1112 if (i == 0) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001113 trb->bpl = dwc3_trb_dma_offset(dep,
Mayank Rana64d136b2016-11-01 21:01:34 -07001114 &dep->trb_pool[1]);
1115 trb->ctrl = DWC3_TRBCTL_LINK_TRB;
1116 } else if (i == (num_trbs - 1)) {
1117 /* Set up the Link TRB at the end */
1118 trb->bpl = dwc3_trb_dma_offset(dep,
1119 &dep->trb_pool[0]);
Mayank Rana511f3b22016-08-02 12:00:11 -07001120 trb->bph = (1 << 23) | (1 << 21)
1121 | (ep->ep_intr_num << 16);
Mayank Rana511f3b22016-08-02 12:00:11 -07001122 trb->ctrl = DWC3_TRBCTL_LINK_TRB
1123 | DWC3_TRB_CTRL_HWO;
Mayank Rana64d136b2016-11-01 21:01:34 -07001124 } else {
1125 trb->bpl = lower_32_bits(buffer_addr);
1126 trb->size = req->buf_len;
1127 buffer_addr += req->buf_len;
1128 trb->ctrl = DWC3_TRBCTL_NORMAL
1129 | DWC3_TRB_CTRL_IOC
1130 | DWC3_TRB_CTRL_CSP
1131 | DWC3_TRB_CTRL_ISP_IMI;
Mayank Rana511f3b22016-08-02 12:00:11 -07001132 }
1133 }
1134 }
Mayank Rana64d136b2016-11-01 21:01:34 -07001135
1136 pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name);
1137 trb = &dep->trb_pool[0];
1138 if (trb) {
1139 for (i = 0; i < num_trbs; i++) {
1140 pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n",
1141 i, (unsigned long)dwc3_trb_dma_offset(dep,
1142 &dep->trb_pool[i]), trb->bpl, trb->bph,
1143 trb->size, trb->ctrl);
1144 trb++;
1145 }
1146 }
1147
Mayank Rana511f3b22016-08-02 12:00:11 -07001148 return 0;
1149}
1150
1151/*
1152* Frees TRBs for GSI EPs.
1153*
1154* @usb_ep - pointer to usb_ep instance.
1155*
1156*/
1157static void gsi_free_trbs(struct usb_ep *ep)
1158{
1159 struct dwc3_ep *dep = to_dwc3_ep(ep);
Mayank Ranae0a427e2017-09-18 16:56:26 -07001160 struct dwc3 *dwc = dep->dwc;
Mayank Rana511f3b22016-08-02 12:00:11 -07001161
1162 if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
1163 return;
1164
1165 /* Free TRBs and TRB pool for EP */
Mayank Ranae0a427e2017-09-18 16:56:26 -07001166 if (dep->trb_pool_dma) {
1167 dma_free_coherent(dwc->sysdev,
1168 dep->num_trbs * sizeof(struct dwc3_trb),
1169 dep->trb_pool,
1170 dep->trb_pool_dma);
Mayank Rana511f3b22016-08-02 12:00:11 -07001171 dep->trb_pool = NULL;
1172 dep->trb_pool_dma = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07001173 }
1174}
1175/*
1176* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
1177*
1178* @usb_ep - pointer to usb_ep instance.
1179* @request - pointer to GSI request.
1180*/
1181static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
1182{
1183 struct dwc3_ep *dep = to_dwc3_ep(ep);
1184 struct dwc3 *dwc = dep->dwc;
1185 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1186 struct dwc3_gadget_ep_cmd_params params;
1187 const struct usb_endpoint_descriptor *desc = ep->desc;
1188 const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
Mayank Ranaac1200c2017-04-25 13:48:46 -07001189 u32 reg;
1190 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001191
1192 memset(&params, 0x00, sizeof(params));
1193
1194 /* Configure GSI EP */
1195 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
1196 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
1197
1198 /* Burst size is only needed in SuperSpeed mode */
1199 if (dwc->gadget.speed == USB_SPEED_SUPER) {
1200 u32 burst = dep->endpoint.maxburst - 1;
1201
1202 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
1203 }
1204
1205 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
1206 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
1207 | DWC3_DEPCFG_STREAM_EVENT_EN;
1208 dep->stream_capable = true;
1209 }
1210
1211 /* Set EP number */
1212 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
1213
1214 /* Set interrupter number for GSI endpoints */
1215 params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
1216
1217 /* Enable XferInProgress and XferComplete Interrupts */
1218 params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
1219 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
1220 params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
1221 /*
1222 * We must use the lower 16 TX FIFOs even though
1223 * HW might have more
1224 */
1225 /* Remove FIFO Number for GSI EP*/
1226 if (dep->direction)
1227 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
1228
1229 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
1230
1231 dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
1232 params.param0, params.param1, params.param2, dep->name);
1233
Mayank Rana83ad5822016-08-09 14:17:22 -07001234 dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
Mayank Rana511f3b22016-08-02 12:00:11 -07001235
1236 /* Set XferRsc Index for GSI EP */
1237 if (!(dep->flags & DWC3_EP_ENABLED)) {
Mayank Ranaac1200c2017-04-25 13:48:46 -07001238 ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
1239 if (ret)
1240 return;
1241
Mayank Rana511f3b22016-08-02 12:00:11 -07001242 memset(&params, 0x00, sizeof(params));
1243 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
Mayank Rana83ad5822016-08-09 14:17:22 -07001244 dwc3_send_gadget_ep_cmd(dep,
Mayank Rana511f3b22016-08-02 12:00:11 -07001245 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
1246
1247 dep->endpoint.desc = desc;
1248 dep->comp_desc = comp_desc;
1249 dep->type = usb_endpoint_type(desc);
1250 dep->flags |= DWC3_EP_ENABLED;
1251 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1252 reg |= DWC3_DALEPENA_EP(dep->number);
1253 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1254 }
1255
1256}
1257
1258/*
1259* Enables USB wrapper for GSI
1260*
1261* @usb_ep - pointer to usb_ep instance.
1262*/
1263static void gsi_enable(struct usb_ep *ep)
1264{
1265 struct dwc3_ep *dep = to_dwc3_ep(ep);
1266 struct dwc3 *dwc = dep->dwc;
1267 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1268
1269 dwc3_msm_write_reg_field(mdwc->base,
1270 GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
1271 dwc3_msm_write_reg_field(mdwc->base,
1272 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
1273 dwc3_msm_write_reg_field(mdwc->base,
1274 GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
1275 dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
1276 dwc3_msm_write_reg_field(mdwc->base,
1277 GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
1278}
1279
1280/*
1281* Block or allow doorbell towards GSI
1282*
1283* @usb_ep - pointer to usb_ep instance.
1284* @request - pointer to GSI request. In this case num_bufs is used as a bool
1285* to set or clear the doorbell bit
1286*/
1287static void gsi_set_clear_dbell(struct usb_ep *ep,
1288 bool block_db)
1289{
1290
1291 struct dwc3_ep *dep = to_dwc3_ep(ep);
1292 struct dwc3 *dwc = dep->dwc;
1293 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1294
1295 dwc3_msm_write_reg_field(mdwc->base,
1296 GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
1297}
1298
1299/*
1300* Performs necessary checks before stopping GSI channels
1301*
1302* @usb_ep - pointer to usb_ep instance to access DWC3 regs
1303*/
1304static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
1305{
1306 u32 timeout = 1500;
1307 u32 reg = 0;
1308 struct dwc3_ep *dep = to_dwc3_ep(ep);
1309 struct dwc3 *dwc = dep->dwc;
1310 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1311
1312 while (dwc3_msm_read_reg_field(mdwc->base,
1313 GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
1314 if (!timeout--) {
1315 dev_err(mdwc->dev,
1316 "Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
1317 return false;
1318 }
1319 }
1320 /* Check for U3 only if we are not handling Function Suspend */
1321 if (!f_suspend) {
1322 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1323 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
1324 dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
1325 return false;
1326 }
1327 }
1328
1329 return true;
1330}
1331
1332
1333/**
1334* Performs GSI operations or GSI EP related operations.
1335*
1336* @usb_ep - pointer to usb_ep instance.
1337* @op_data - pointer to opcode related data.
1338* @op - GSI related or GSI EP related op code.
1339*
1340* @return int - 0 on success, negative on error.
1341* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
1342*/
1343static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
1344 void *op_data, enum gsi_ep_op op)
1345{
1346 u32 ret = 0;
1347 struct dwc3_ep *dep = to_dwc3_ep(ep);
1348 struct dwc3 *dwc = dep->dwc;
1349 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1350 struct usb_gsi_request *request;
1351 struct gsi_channel_info *ch_info;
1352 bool block_db, f_suspend;
Mayank Rana8432c362016-09-30 18:41:17 -07001353 unsigned long flags;
Mayank Rana511f3b22016-08-02 12:00:11 -07001354
1355 switch (op) {
1356 case GSI_EP_OP_PREPARE_TRBS:
1357 request = (struct usb_gsi_request *)op_data;
1358 dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
1359 ret = gsi_prepare_trbs(ep, request);
1360 break;
1361 case GSI_EP_OP_FREE_TRBS:
1362 dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
1363 gsi_free_trbs(ep);
1364 break;
1365 case GSI_EP_OP_CONFIG:
1366 request = (struct usb_gsi_request *)op_data;
1367 dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001368 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001369 gsi_configure_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001370 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001371 break;
1372 case GSI_EP_OP_STARTXFER:
1373 dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001374 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001375 ret = gsi_startxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001376 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001377 break;
1378 case GSI_EP_OP_GET_XFER_IDX:
1379 dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
1380 ret = gsi_get_xfer_index(ep);
1381 break;
1382 case GSI_EP_OP_STORE_DBL_INFO:
1383 dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
1384 gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
1385 break;
1386 case GSI_EP_OP_ENABLE_GSI:
1387 dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
1388 gsi_enable(ep);
1389 break;
1390 case GSI_EP_OP_GET_CH_INFO:
1391 ch_info = (struct gsi_channel_info *)op_data;
1392 gsi_get_channel_info(ep, ch_info);
1393 break;
Mayank Rana64d136b2016-11-01 21:01:34 -07001394 case GSI_EP_OP_RING_DB:
Mayank Rana511f3b22016-08-02 12:00:11 -07001395 request = (struct usb_gsi_request *)op_data;
Mayank Rana64d136b2016-11-01 21:01:34 -07001396 dbg_print(0xFF, "RING_DB", 0, ep->name);
1397 gsi_ring_db(ep, request);
Mayank Rana511f3b22016-08-02 12:00:11 -07001398 break;
1399 case GSI_EP_OP_UPDATEXFER:
1400 request = (struct usb_gsi_request *)op_data;
1401 dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
Mayank Rana8432c362016-09-30 18:41:17 -07001402 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001403 ret = gsi_updatexfer_for_ep(ep, request);
Mayank Rana8432c362016-09-30 18:41:17 -07001404 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001405 break;
1406 case GSI_EP_OP_ENDXFER:
1407 request = (struct usb_gsi_request *)op_data;
1408 dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
Mayank Rana8432c362016-09-30 18:41:17 -07001409 spin_lock_irqsave(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001410 gsi_endxfer_for_ep(ep);
Mayank Rana8432c362016-09-30 18:41:17 -07001411 spin_unlock_irqrestore(&dwc->lock, flags);
Mayank Rana511f3b22016-08-02 12:00:11 -07001412 break;
1413 case GSI_EP_OP_SET_CLR_BLOCK_DBL:
1414 block_db = *((bool *)op_data);
1415 dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
1416 block_db);
1417 gsi_set_clear_dbell(ep, block_db);
1418 break;
1419 case GSI_EP_OP_CHECK_FOR_SUSPEND:
1420 dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
1421 f_suspend = *((bool *)op_data);
1422 ret = gsi_check_ready_to_suspend(ep, f_suspend);
1423 break;
1424 case GSI_EP_OP_DISABLE:
1425 dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
1426 ret = ep->ops->disable(ep);
1427 break;
1428 default:
1429 dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
1430 }
1431
1432 return ret;
1433}
1434
1435/**
1436 * Configure MSM endpoint.
1437 * This function do specific configurations
1438 * to an endpoint which need specific implementaion
1439 * in the MSM architecture.
1440 *
1441 * This function should be called by usb function/class
1442 * layer which need a support from the specific MSM HW
1443 * which wrap the USB3 core. (like GSI or DBM specific endpoints)
1444 *
1445 * @ep - a pointer to some usb_ep instance
1446 *
1447 * @return int - 0 on success, negetive on error.
1448 */
1449int msm_ep_config(struct usb_ep *ep)
1450{
1451 struct dwc3_ep *dep = to_dwc3_ep(ep);
1452 struct dwc3 *dwc = dep->dwc;
1453 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1454 struct usb_ep_ops *new_ep_ops;
1455
1456
1457 /* Save original ep ops for future restore*/
1458 if (mdwc->original_ep_ops[dep->number]) {
1459 dev_err(mdwc->dev,
1460 "ep [%s,%d] already configured as msm endpoint\n",
1461 ep->name, dep->number);
1462 return -EPERM;
1463 }
1464 mdwc->original_ep_ops[dep->number] = ep->ops;
1465
1466 /* Set new usb ops as we like */
1467 new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
1468 if (!new_ep_ops)
1469 return -ENOMEM;
1470
1471 (*new_ep_ops) = (*ep->ops);
1472 new_ep_ops->queue = dwc3_msm_ep_queue;
1473 new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
1474 ep->ops = new_ep_ops;
1475
1476 /*
1477 * Do HERE more usb endpoint configurations
1478 * which are specific to MSM.
1479 */
1480
1481 return 0;
1482}
1483EXPORT_SYMBOL(msm_ep_config);
1484
1485/**
1486 * Un-configure MSM endpoint.
1487 * Tear down configurations done in the
1488 * dwc3_msm_ep_config function.
1489 *
1490 * @ep - a pointer to some usb_ep instance
1491 *
1492 * @return int - 0 on success, negative on error.
1493 */
1494int msm_ep_unconfig(struct usb_ep *ep)
1495{
1496 struct dwc3_ep *dep = to_dwc3_ep(ep);
1497 struct dwc3 *dwc = dep->dwc;
1498 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1499 struct usb_ep_ops *old_ep_ops;
1500
1501 /* Restore original ep ops */
1502 if (!mdwc->original_ep_ops[dep->number]) {
1503 dev_err(mdwc->dev,
1504 "ep [%s,%d] was not configured as msm endpoint\n",
1505 ep->name, dep->number);
1506 return -EINVAL;
1507 }
1508 old_ep_ops = (struct usb_ep_ops *)ep->ops;
1509 ep->ops = mdwc->original_ep_ops[dep->number];
1510 mdwc->original_ep_ops[dep->number] = NULL;
1511 kfree(old_ep_ops);
1512
1513 /*
1514 * Do HERE more usb endpoint un-configurations
1515 * which are specific to MSM.
1516 */
1517
1518 return 0;
1519}
1520EXPORT_SYMBOL(msm_ep_unconfig);
1521#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
1522
1523static void dwc3_resume_work(struct work_struct *w);
1524
1525static void dwc3_restart_usb_work(struct work_struct *w)
1526{
1527 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1528 restart_usb_work);
1529 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1530 unsigned int timeout = 50;
1531
1532 dev_dbg(mdwc->dev, "%s\n", __func__);
1533
1534 if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
1535 dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
1536 return;
1537 }
1538
1539 /* guard against concurrent VBUS handling */
1540 mdwc->in_restart = true;
1541
1542 if (!mdwc->vbus_active) {
1543 dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
1544 dwc->err_evt_seen = false;
1545 mdwc->in_restart = false;
1546 return;
1547 }
1548
Mayank Rana08e41922017-03-02 15:25:48 -08001549 dbg_event(0xFF, "RestartUSB", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07001550 /* Reset active USB connection */
1551 dwc3_resume_work(&mdwc->resume_work);
1552
1553 /* Make sure disconnect is processed before sending connect */
1554 while (--timeout && !pm_runtime_suspended(mdwc->dev))
1555 msleep(20);
1556
1557 if (!timeout) {
1558 dev_dbg(mdwc->dev,
1559 "Not in LPM after disconnect, forcing suspend...\n");
Mayank Rana08e41922017-03-02 15:25:48 -08001560 dbg_event(0xFF, "ReStart:RT SUSP",
1561 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07001562 pm_runtime_suspend(mdwc->dev);
1563 }
1564
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301565 mdwc->in_restart = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07001566 /* Force reconnect only if cable is still connected */
Vijayavardhan Vennapusa5e5680e2016-11-25 11:25:35 +05301567 if (mdwc->vbus_active)
Mayank Rana511f3b22016-08-02 12:00:11 -07001568 dwc3_resume_work(&mdwc->resume_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001569
1570 dwc->err_evt_seen = false;
1571 flush_delayed_work(&mdwc->sm_work);
1572}
1573
Manu Gautam976fdfc2016-08-18 09:27:35 +05301574static int msm_dwc3_usbdev_notify(struct notifier_block *self,
1575 unsigned long action, void *priv)
1576{
1577 struct dwc3_msm *mdwc = container_of(self, struct dwc3_msm, usbdev_nb);
1578 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1579 struct usb_bus *bus = priv;
1580
1581 /* Interested only in recovery when HC dies */
1582 if (action != USB_BUS_DIED)
1583 return 0;
1584
1585 dev_dbg(mdwc->dev, "%s initiate recovery from hc_died\n", __func__);
1586 /* Recovery already under process */
1587 if (mdwc->hc_died)
1588 return 0;
1589
1590 if (bus->controller != &dwc->xhci->dev) {
1591 dev_dbg(mdwc->dev, "%s event for diff HCD\n", __func__);
1592 return 0;
1593 }
1594
1595 mdwc->hc_died = true;
1596 schedule_delayed_work(&mdwc->sm_work, 0);
1597 return 0;
1598}
1599
1600
Mayank Rana511f3b22016-08-02 12:00:11 -07001601/*
1602 * Check whether the DWC3 requires resetting the ep
1603 * after going to Low Power Mode (lpm)
1604 */
1605bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
1606{
1607 struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
1608 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
1609
1610 return dbm_reset_ep_after_lpm(mdwc->dbm);
1611}
1612EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
1613
1614/*
1615 * Config Global Distributed Switch Controller (GDSC)
1616 * to support controller power collapse
1617 */
1618static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
1619{
1620 int ret;
1621
1622 if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
1623 return -EPERM;
1624
1625 if (on) {
1626 ret = regulator_enable(mdwc->dwc3_gdsc);
1627 if (ret) {
1628 dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
1629 return ret;
1630 }
1631 } else {
1632 ret = regulator_disable(mdwc->dwc3_gdsc);
1633 if (ret) {
1634 dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
1635 return ret;
1636 }
1637 }
1638
1639 return ret;
1640}
1641
1642static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
1643{
1644 int ret = 0;
1645
1646 if (assert) {
Mayank Ranad339abe2017-05-31 09:19:49 -07001647 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001648 /* Using asynchronous block reset to the hardware */
1649 dev_dbg(mdwc->dev, "block_reset ASSERT\n");
1650 clk_disable_unprepare(mdwc->utmi_clk);
1651 clk_disable_unprepare(mdwc->sleep_clk);
1652 clk_disable_unprepare(mdwc->core_clk);
1653 clk_disable_unprepare(mdwc->iface_clk);
Amit Nischal4d278212016-06-06 17:54:34 +05301654 ret = reset_control_assert(mdwc->core_reset);
Mayank Rana511f3b22016-08-02 12:00:11 -07001655 if (ret)
Amit Nischal4d278212016-06-06 17:54:34 +05301656 dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001657 } else {
1658 dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
Amit Nischal4d278212016-06-06 17:54:34 +05301659 ret = reset_control_deassert(mdwc->core_reset);
1660 if (ret)
1661 dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07001662 ndelay(200);
1663 clk_prepare_enable(mdwc->iface_clk);
1664 clk_prepare_enable(mdwc->core_clk);
1665 clk_prepare_enable(mdwc->sleep_clk);
1666 clk_prepare_enable(mdwc->utmi_clk);
Mayank Ranad339abe2017-05-31 09:19:49 -07001667 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07001668 }
1669
1670 return ret;
1671}
1672
1673static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
1674{
1675 u32 guctl, gfladj = 0;
1676
1677 guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
1678 guctl &= ~DWC3_GUCTL_REFCLKPER;
1679
1680 /* GFLADJ register is used starting with revision 2.50a */
1681 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
1682 gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
1683 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1684 gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
1685 gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
1686 gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
1687 }
1688
1689 /* Refer to SNPS Databook Table 6-55 for calculations used */
1690 switch (mdwc->utmi_clk_rate) {
1691 case 19200000:
1692 guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
1693 gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1694 gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
1695 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1696 gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1697 break;
1698 case 24000000:
1699 guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
1700 gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
1701 gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
1702 gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
1703 break;
1704 default:
1705 dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
1706 mdwc->utmi_clk_rate);
1707 break;
1708 }
1709
1710 dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
1711 if (gfladj)
1712 dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
1713}
1714
1715/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
1716static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
1717{
1718 if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
1719 /* On older cores set XHCI_REV bit to specify revision 1.0 */
1720 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1721 BIT(2), 1);
1722
1723 /*
1724 * Enable master clock for RAMs to allow BAM to access RAMs when
1725 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
1726 * are seen where RAM clocks get turned OFF in SS mode
1727 */
1728 dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
1729 dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
1730
1731}
1732
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001733static void dwc3_msm_vbus_draw_work(struct work_struct *w)
1734{
1735 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
1736 vbus_draw_work);
1737 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1738
1739 dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
1740}
1741
Mayank Rana511f3b22016-08-02 12:00:11 -07001742static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
1743{
1744 struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
Mayank Ranaf4918d32016-12-15 13:35:55 -08001745 struct dwc3_event_buffer *evt;
Mayank Rana511f3b22016-08-02 12:00:11 -07001746 u32 reg;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001747 int i;
Mayank Rana511f3b22016-08-02 12:00:11 -07001748
1749 switch (event) {
1750 case DWC3_CONTROLLER_ERROR_EVENT:
1751 dev_info(mdwc->dev,
1752 "DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
1753 dwc->irq_cnt);
1754
1755 dwc3_gadget_disable_irq(dwc);
1756
1757 /* prevent core from generating interrupts until recovery */
1758 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
1759 reg |= DWC3_GCTL_CORESOFTRESET;
1760 dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
1761
1762 /* restart USB which performs full reset and reconnect */
1763 schedule_work(&mdwc->restart_usb_work);
1764 break;
1765 case DWC3_CONTROLLER_RESET_EVENT:
1766 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
1767 /* HS & SSPHYs get reset as part of core soft reset */
1768 dwc3_msm_qscratch_reg_init(mdwc);
1769 break;
1770 case DWC3_CONTROLLER_POST_RESET_EVENT:
1771 dev_dbg(mdwc->dev,
1772 "DWC3_CONTROLLER_POST_RESET_EVENT received\n");
1773
1774 /*
1775 * Below sequence is used when controller is working without
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301776 * having ssphy and only USB high/full speed is supported.
Mayank Rana511f3b22016-08-02 12:00:11 -07001777 */
Vijayavardhan Vennapusa64d7a522016-10-21 15:02:09 +05301778 if (dwc->maximum_speed == USB_SPEED_HIGH ||
1779 dwc->maximum_speed == USB_SPEED_FULL) {
Mayank Rana511f3b22016-08-02 12:00:11 -07001780 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1781 dwc3_msm_read_reg(mdwc->base,
1782 QSCRATCH_GENERAL_CFG)
1783 | PIPE_UTMI_CLK_DIS);
1784
1785 usleep_range(2, 5);
1786
1787
1788 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1789 dwc3_msm_read_reg(mdwc->base,
1790 QSCRATCH_GENERAL_CFG)
1791 | PIPE_UTMI_CLK_SEL
1792 | PIPE3_PHYSTATUS_SW);
1793
1794 usleep_range(2, 5);
1795
1796 dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
1797 dwc3_msm_read_reg(mdwc->base,
1798 QSCRATCH_GENERAL_CFG)
1799 & ~PIPE_UTMI_CLK_DIS);
1800 }
1801
1802 dwc3_msm_update_ref_clk(mdwc);
1803 dwc->tx_fifo_size = mdwc->tx_fifo_size;
1804 break;
1805 case DWC3_CONTROLLER_CONNDONE_EVENT:
1806 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
1807 /*
1808 * Add power event if the dbm indicates coming out of L1 by
1809 * interrupt
1810 */
1811 if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
1812 dwc3_msm_write_reg_field(mdwc->base,
1813 PWR_EVNT_IRQ_MASK_REG,
1814 PWR_EVNT_LPM_OUT_L1_MASK, 1);
1815
1816 atomic_set(&dwc->in_lpm, 0);
1817 break;
1818 case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
1819 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
1820 if (dwc->enable_bus_suspend) {
1821 mdwc->suspend = dwc->b_suspend;
1822 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
1823 }
1824 break;
1825 case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
1826 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
Jack Pham4b8b4ae2016-08-09 11:36:34 -07001827 schedule_work(&mdwc->vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001828 break;
1829 case DWC3_CONTROLLER_RESTART_USB_SESSION:
1830 dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
Hemant Kumar43874172016-08-25 16:17:48 -07001831 schedule_work(&mdwc->restart_usb_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07001832 break;
Mayank Ranaf4918d32016-12-15 13:35:55 -08001833 case DWC3_GSI_EVT_BUF_ALLOC:
1834 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_ALLOC\n");
1835
1836 if (!mdwc->num_gsi_event_buffers)
1837 break;
1838
1839 mdwc->gsi_ev_buff = devm_kzalloc(dwc->dev,
1840 sizeof(*dwc->ev_buf) * mdwc->num_gsi_event_buffers,
1841 GFP_KERNEL);
1842 if (!mdwc->gsi_ev_buff) {
1843 dev_err(dwc->dev, "can't allocate gsi_ev_buff\n");
1844 break;
1845 }
1846
1847 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1848
1849 evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
1850 if (!evt)
1851 break;
1852 evt->dwc = dwc;
1853 evt->length = DWC3_EVENT_BUFFERS_SIZE;
Mayank Rana0e4c4432017-09-18 16:46:00 -07001854 evt->buf = dma_alloc_coherent(dwc->sysdev,
Mayank Ranaf4918d32016-12-15 13:35:55 -08001855 DWC3_EVENT_BUFFERS_SIZE,
1856 &evt->dma, GFP_KERNEL);
1857 if (!evt->buf) {
1858 dev_err(dwc->dev,
1859 "can't allocate gsi_evt_buf(%d)\n", i);
1860 break;
1861 }
1862 mdwc->gsi_ev_buff[i] = evt;
1863 }
1864 break;
1865 case DWC3_GSI_EVT_BUF_SETUP:
1866 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
1867 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1868 evt = mdwc->gsi_ev_buff[i];
Mayank Rana0eb0db72017-10-03 13:46:32 -07001869 if (!evt)
1870 break;
1871
Mayank Ranaf4918d32016-12-15 13:35:55 -08001872 dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
1873 evt->buf, (unsigned long long) evt->dma,
1874 evt->length);
1875 memset(evt->buf, 0, evt->length);
1876 evt->lpos = 0;
1877 /*
1878 * Primary event buffer is programmed with registers
1879 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1880 * program USB GSI related event buffer with DWC3
1881 * controller.
1882 */
1883 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)),
1884 lower_32_bits(evt->dma));
1885 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)),
1886 DWC3_GEVNTADRHI_EVNTADRHI_GSI_EN(
1887 DWC3_GEVENT_TYPE_GSI) |
1888 DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX((i+1)));
1889 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1890 DWC3_GEVNTCOUNT_EVNTINTRPTMASK |
1891 ((evt->length) & 0xffff));
1892 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1893 }
1894 break;
1895 case DWC3_GSI_EVT_BUF_CLEANUP:
1896 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
Mayank Rana0eb0db72017-10-03 13:46:32 -07001897 if (!mdwc->gsi_ev_buff)
1898 break;
1899
Mayank Ranaf4918d32016-12-15 13:35:55 -08001900 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1901 evt = mdwc->gsi_ev_buff[i];
1902 evt->lpos = 0;
1903 /*
1904 * Primary event buffer is programmed with registers
1905 * DWC3_GEVNT*(0). Hence use DWC3_GEVNT*(i+1) to
1906 * program USB GSI related event buffer with DWC3
1907 * controller.
1908 */
1909 dwc3_writel(dwc->regs, DWC3_GEVNTADRLO((i+1)), 0);
1910 dwc3_writel(dwc->regs, DWC3_GEVNTADRHI((i+1)), 0);
1911 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ((i+1)),
1912 DWC3_GEVNTSIZ_INTMASK |
1913 DWC3_GEVNTSIZ_SIZE((i+1)));
1914 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT((i+1)), 0);
1915 }
1916 break;
1917 case DWC3_GSI_EVT_BUF_FREE:
1918 dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
Mayank Rana0eb0db72017-10-03 13:46:32 -07001919 if (!mdwc->gsi_ev_buff)
1920 break;
1921
Mayank Ranaf4918d32016-12-15 13:35:55 -08001922 for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
1923 evt = mdwc->gsi_ev_buff[i];
1924 if (evt)
Mayank Rana0e4c4432017-09-18 16:46:00 -07001925 dma_free_coherent(dwc->sysdev, evt->length,
Mayank Ranaf4918d32016-12-15 13:35:55 -08001926 evt->buf, evt->dma);
1927 }
1928 break;
Mayank Rana511f3b22016-08-02 12:00:11 -07001929 default:
1930 dev_dbg(mdwc->dev, "unknown dwc3 event\n");
1931 break;
1932 }
1933}
1934
1935static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
1936{
1937 int ret = 0;
1938
1939 if (core_reset) {
1940 ret = dwc3_msm_link_clk_reset(mdwc, 1);
1941 if (ret)
1942 return;
1943
1944 usleep_range(1000, 1200);
1945 ret = dwc3_msm_link_clk_reset(mdwc, 0);
1946 if (ret)
1947 return;
1948
1949 usleep_range(10000, 12000);
1950 }
1951
1952 if (mdwc->dbm) {
1953 /* Reset the DBM */
1954 dbm_soft_reset(mdwc->dbm, 1);
1955 usleep_range(1000, 1200);
1956 dbm_soft_reset(mdwc->dbm, 0);
1957
1958 /*enable DBM*/
1959 dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
1960 DBM_EN_MASK, 0x1);
1961 dbm_enable(mdwc->dbm);
1962 }
1963}
1964
1965static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
1966{
1967 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
1968 u32 val;
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301969 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07001970
1971 /* Configure AHB2PHY for one wait state read/write */
1972 if (mdwc->ahb2phy_base) {
1973 clk_prepare_enable(mdwc->cfg_ahb_clk);
1974 val = readl_relaxed(mdwc->ahb2phy_base +
1975 PERIPH_SS_AHB2PHY_TOP_CFG);
1976 if (val != ONE_READ_WRITE_WAIT) {
1977 writel_relaxed(ONE_READ_WRITE_WAIT,
1978 mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
1979 /* complete above write before configuring USB PHY. */
1980 mb();
1981 }
1982 clk_disable_unprepare(mdwc->cfg_ahb_clk);
1983 }
1984
1985 if (!mdwc->init) {
Mayank Rana08e41922017-03-02 15:25:48 -08001986 dbg_event(0xFF, "dwc3 init",
1987 atomic_read(&mdwc->dev->power.usage_count));
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05301988 ret = dwc3_core_pre_init(dwc);
1989 if (ret) {
1990 dev_err(mdwc->dev, "dwc3_core_pre_init failed\n");
1991 return;
1992 }
Mayank Rana511f3b22016-08-02 12:00:11 -07001993 mdwc->init = true;
1994 }
1995
1996 dwc3_core_init(dwc);
1997 /* Re-configure event buffers */
1998 dwc3_event_buffers_setup(dwc);
1999}
2000
2001static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
2002{
2003 unsigned long timeout;
2004 u32 reg = 0;
2005
Mayank Rana7e781e72017-12-13 17:27:23 -08002006 if ((mdwc->in_host_mode || mdwc->in_device_mode)
Vijayavardhan Vennapusa8cf91a62016-09-01 12:05:50 +05302007 && dwc3_msm_is_superspeed(mdwc) && !mdwc->in_restart) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002008 if (!atomic_read(&mdwc->in_p3)) {
2009 dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
2010 return -EBUSY;
2011 }
2012 }
2013
2014 /* Clear previous L2 events */
2015 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2016 PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
2017
2018 /* Prepare HSPHY for suspend */
2019 reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
2020 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2021 reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
2022
2023 /* Wait for PHY to go into L2 */
2024 timeout = jiffies + msecs_to_jiffies(5);
2025 while (!time_after(jiffies, timeout)) {
2026 reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2027 if (reg & PWR_EVNT_LPM_IN_L2_MASK)
2028 break;
2029 }
2030 if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
2031 dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
2032
2033 /* Clear L2 event bit */
2034 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
2035 PWR_EVNT_LPM_IN_L2_MASK);
2036
2037 return 0;
2038}
2039
Mayank Rana511f3b22016-08-02 12:00:11 -07002040static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
2041{
2042 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2043 int i, num_ports;
2044 u32 reg;
2045
2046 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2047 if (mdwc->in_host_mode) {
2048 reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
2049 num_ports = HCS_MAX_PORTS(reg);
2050 for (i = 0; i < num_ports; i++) {
2051 reg = dwc3_msm_read_reg(mdwc->base,
2052 USB3_PORTSC + i*0x10);
2053 if (reg & PORT_PE) {
2054 if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
2055 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2056 else if (DEV_LOWSPEED(reg))
2057 mdwc->hs_phy->flags |= PHY_LS_MODE;
2058 }
2059 }
2060 } else {
2061 if (dwc->gadget.speed == USB_SPEED_HIGH ||
2062 dwc->gadget.speed == USB_SPEED_FULL)
2063 mdwc->hs_phy->flags |= PHY_HSFS_MODE;
2064 else if (dwc->gadget.speed == USB_SPEED_LOW)
2065 mdwc->hs_phy->flags |= PHY_LS_MODE;
2066 }
2067}
2068
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302069static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
2070 bool perf_mode);
Mayank Rana511f3b22016-08-02 12:00:11 -07002071
Mayank Ranad339abe2017-05-31 09:19:49 -07002072static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc,
2073 struct usb_irq *uirq, unsigned int polarity, bool enable)
2074{
2075 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2076
2077 if (uirq && enable && !uirq->enable) {
2078 dbg_event(0xFF, "PDC_IRQ_EN", uirq->irq);
2079 dbg_event(0xFF, "PDC_IRQ_POL", polarity);
2080 /* clear any pending interrupt */
2081 irq_set_irqchip_state(uirq->irq, IRQCHIP_STATE_PENDING, 0);
2082 irq_set_irq_type(uirq->irq, polarity);
2083 enable_irq_wake(uirq->irq);
2084 enable_irq(uirq->irq);
2085 uirq->enable = true;
2086 }
2087
2088 if (uirq && !enable && uirq->enable) {
2089 dbg_event(0xFF, "PDC_IRQ_DIS", uirq->irq);
2090 disable_irq_wake(uirq->irq);
2091 disable_irq_nosync(uirq->irq);
2092 uirq->enable = false;
2093 }
2094}
2095
2096static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable)
2097{
2098 if (!enable)
2099 goto disable_usb_irq;
2100
2101 if (mdwc->hs_phy->flags & PHY_LS_MODE) {
2102 configure_usb_wakeup_interrupt(mdwc,
2103 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2104 IRQ_TYPE_EDGE_FALLING, enable);
2105 } else if (mdwc->hs_phy->flags & PHY_HSFS_MODE) {
2106 configure_usb_wakeup_interrupt(mdwc,
2107 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2108 IRQ_TYPE_EDGE_FALLING, enable);
2109 } else {
2110 configure_usb_wakeup_interrupt(mdwc,
2111 &mdwc->wakeup_irq[DP_HS_PHY_IRQ],
2112 IRQ_TYPE_EDGE_RISING, true);
2113 configure_usb_wakeup_interrupt(mdwc,
2114 &mdwc->wakeup_irq[DM_HS_PHY_IRQ],
2115 IRQ_TYPE_EDGE_RISING, true);
2116 }
2117
2118 configure_usb_wakeup_interrupt(mdwc,
2119 &mdwc->wakeup_irq[SS_PHY_IRQ],
2120 IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH, enable);
2121 return;
2122
2123disable_usb_irq:
2124 configure_usb_wakeup_interrupt(mdwc,
2125 &mdwc->wakeup_irq[DP_HS_PHY_IRQ], 0, enable);
2126 configure_usb_wakeup_interrupt(mdwc,
2127 &mdwc->wakeup_irq[DM_HS_PHY_IRQ], 0, enable);
2128 configure_usb_wakeup_interrupt(mdwc,
2129 &mdwc->wakeup_irq[SS_PHY_IRQ], 0, enable);
2130}
2131
2132static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
2133 struct usb_irq *uirq, bool enable)
2134{
2135 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2136
2137 if (uirq && enable && !uirq->enable) {
2138 dbg_event(0xFF, "IRQ_EN", uirq->irq);
2139 enable_irq_wake(uirq->irq);
2140 enable_irq(uirq->irq);
2141 uirq->enable = true;
2142 }
2143
2144 if (uirq && !enable && uirq->enable) {
2145 dbg_event(0xFF, "IRQ_DIS", uirq->irq);
2146 disable_irq_wake(uirq->irq);
2147 disable_irq_nosync(uirq->irq);
2148 uirq->enable = true;
2149 }
2150}
2151
Mayank Rana511f3b22016-08-02 12:00:11 -07002152static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
2153{
Mayank Rana83ad5822016-08-09 14:17:22 -07002154 int ret;
Mayank Rana511f3b22016-08-02 12:00:11 -07002155 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana83ad5822016-08-09 14:17:22 -07002156 struct dwc3_event_buffer *evt;
Mayank Ranad339abe2017-05-31 09:19:49 -07002157 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002158
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302159 mutex_lock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002160 if (atomic_read(&dwc->in_lpm)) {
2161 dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302162 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002163 return 0;
2164 }
2165
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302166 cancel_delayed_work_sync(&mdwc->perf_vote_work);
2167 msm_dwc3_perf_vote_update(mdwc, false);
2168
Mayank Rana511f3b22016-08-02 12:00:11 -07002169 if (!mdwc->in_host_mode) {
Mayank Rana83ad5822016-08-09 14:17:22 -07002170 evt = dwc->ev_buf;
2171 if ((evt->flags & DWC3_EVENT_PENDING)) {
2172 dev_dbg(mdwc->dev,
Mayank Rana511f3b22016-08-02 12:00:11 -07002173 "%s: %d device events pending, abort suspend\n",
2174 __func__, evt->count / 4);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302175 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana83ad5822016-08-09 14:17:22 -07002176 return -EBUSY;
Mayank Rana511f3b22016-08-02 12:00:11 -07002177 }
2178 }
2179
2180 if (!mdwc->vbus_active && dwc->is_drd &&
2181 mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
2182 /*
2183 * In some cases, the pm_runtime_suspend may be called by
2184 * usb_bam when there is pending lpm flag. However, if this is
2185 * done when cable was disconnected and otg state has not
2186 * yet changed to IDLE, then it means OTG state machine
2187 * is running and we race against it. So cancel LPM for now,
2188 * and OTG state machine will go for LPM later, after completing
2189 * transition to IDLE state.
2190 */
2191 dev_dbg(mdwc->dev,
2192 "%s: cable disconnected while not in idle otg state\n",
2193 __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302194 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002195 return -EBUSY;
2196 }
2197
2198 /*
2199 * Check if device is not in CONFIGURED state
2200 * then check controller state of L2 and break
2201 * LPM sequence. Check this for device bus suspend case.
2202 */
2203 if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
2204 (dwc->gadget.state != USB_STATE_CONFIGURED)) {
2205 pr_err("%s(): Trying to go in LPM with state:%d\n",
2206 __func__, dwc->gadget.state);
2207 pr_err("%s(): LPM is not performed.\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302208 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002209 return -EBUSY;
2210 }
2211
2212 ret = dwc3_msm_prepare_suspend(mdwc);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302213 if (ret) {
2214 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002215 return ret;
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302216 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002217
Mayank Rana511f3b22016-08-02 12:00:11 -07002218 /* Disable core irq */
2219 if (dwc->irq)
2220 disable_irq(dwc->irq);
2221
Mayank Ranaf616a7f2017-03-20 16:10:39 -07002222 if (work_busy(&dwc->bh_work))
2223 dbg_event(0xFF, "pend evt", 0);
2224
Mayank Rana511f3b22016-08-02 12:00:11 -07002225 /* disable power event irq, hs and ss phy irq is used as wake up src */
Mayank Ranad339abe2017-05-31 09:19:49 -07002226 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07002227
2228 dwc3_set_phy_speed_flags(mdwc);
2229 /* Suspend HS PHY */
2230 usb_phy_set_suspend(mdwc->hs_phy, 1);
2231
2232 /* Suspend SS PHY */
Mayank Rana17f67e32017-08-15 10:41:28 -07002233 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002234 /* indicate phy about SS mode */
2235 if (dwc3_msm_is_superspeed(mdwc))
2236 mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
2237 usb_phy_set_suspend(mdwc->ss_phy, 1);
2238 mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
2239 }
2240
2241 /* make sure above writes are completed before turning off clocks */
2242 wmb();
2243
2244 /* Disable clocks */
2245 if (mdwc->bus_aggr_clk)
2246 clk_disable_unprepare(mdwc->bus_aggr_clk);
2247 clk_disable_unprepare(mdwc->utmi_clk);
2248
Hemant Kumar633dc332016-08-10 13:41:05 -07002249 /* Memory core: OFF, Memory periphery: OFF */
2250 if (!mdwc->in_host_mode && !mdwc->vbus_active) {
2251 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_MEM);
2252 clk_set_flags(mdwc->core_clk, CLKFLAG_NORETAIN_PERIPH);
2253 }
2254
Mayank Rana511f3b22016-08-02 12:00:11 -07002255 clk_set_rate(mdwc->core_clk, 19200000);
2256 clk_disable_unprepare(mdwc->core_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302257 if (mdwc->noc_aggr_clk)
2258 clk_disable_unprepare(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07002259 /*
2260 * Disable iface_clk only after core_clk as core_clk has FSM
2261 * depedency on iface_clk. Hence iface_clk should be turned off
2262 * after core_clk is turned off.
2263 */
2264 clk_disable_unprepare(mdwc->iface_clk);
2265 /* USB PHY no more requires TCXO */
2266 clk_disable_unprepare(mdwc->xo_clk);
2267
2268 /* Perform controller power collapse */
Mayank Rana7e781e72017-12-13 17:27:23 -08002269 if (!mdwc->in_host_mode && (!mdwc->in_device_mode ||
2270 mdwc->in_restart)) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002271 mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
2272 dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
2273 dwc3_msm_config_gdsc(mdwc, 0);
2274 clk_disable_unprepare(mdwc->sleep_clk);
Jack Phambbe27962017-03-23 18:42:26 -07002275
Jack Pham9faa51df2017-04-03 18:13:40 -07002276 if (mdwc->iommu_map) {
Jack Phambbe27962017-03-23 18:42:26 -07002277 arm_iommu_detach_device(mdwc->dev);
Jack Pham9faa51df2017-04-03 18:13:40 -07002278 dev_dbg(mdwc->dev, "IOMMU detached\n");
2279 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002280 }
2281
2282 /* Remove bus voting */
2283 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002284 dbg_event(0xFF, "bus_devote_start", 0);
2285 ret = msm_bus_scale_client_update_request(
2286 mdwc->bus_perf_client, 0);
2287 dbg_event(0xFF, "bus_devote_finish", 0);
2288 if (ret)
2289 dev_err(mdwc->dev, "bus bw unvoting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002290 }
2291
2292 /*
2293 * release wakeup source with timeout to defer system suspend to
2294 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
2295 * event is received.
2296 */
2297 if (mdwc->lpm_to_suspend_delay) {
2298 dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
2299 mdwc->lpm_to_suspend_delay);
2300 pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
2301 } else {
2302 pm_relax(mdwc->dev);
2303 }
2304
2305 atomic_set(&dwc->in_lpm, 1);
2306
2307 /*
2308 * with DCP or during cable disconnect, we dont require wakeup
2309 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
2310 * case of host bus suspend and device bus suspend.
2311 */
Mayank Rana7e781e72017-12-13 17:27:23 -08002312 if (mdwc->in_device_mode || mdwc->in_host_mode) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002313 if (mdwc->use_pdc_interrupts) {
2314 enable_usb_pdc_interrupt(mdwc, true);
2315 } else {
2316 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2317 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
2318 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2319 configure_nonpdc_usb_interrupt(mdwc, uirq, true);
Mayank Rana511f3b22016-08-02 12:00:11 -07002320 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002321 mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2322 }
2323
2324 dev_info(mdwc->dev, "DWC3 in low power mode\n");
Mayank Rana7e781e72017-12-13 17:27:23 -08002325 dbg_event(0xFF, "Ctl Sus", atomic_read(&dwc->in_lpm));
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302326 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002327 return 0;
2328}
2329
2330static int dwc3_msm_resume(struct dwc3_msm *mdwc)
2331{
2332 int ret;
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002333 long core_clk_rate;
Mayank Rana511f3b22016-08-02 12:00:11 -07002334 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Ranad339abe2017-05-31 09:19:49 -07002335 struct usb_irq *uirq;
Mayank Rana511f3b22016-08-02 12:00:11 -07002336
2337 dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
2338
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302339 mutex_lock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002340 if (!atomic_read(&dwc->in_lpm)) {
2341 dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302342 mutex_unlock(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07002343 return 0;
2344 }
2345
2346 pm_stay_awake(mdwc->dev);
2347
2348 /* Enable bus voting */
2349 if (mdwc->bus_perf_client) {
Mayank Ranaca9f3182017-04-13 17:44:14 -07002350 dbg_event(0xFF, "bus_vote_start", 1);
2351 ret = msm_bus_scale_client_update_request(
2352 mdwc->bus_perf_client, 1);
2353 dbg_event(0xFF, "bus_vote_finish", 1);
2354 if (ret)
2355 dev_err(mdwc->dev, "bus bw voting failed %d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002356 }
2357
2358 /* Vote for TCXO while waking up USB HSPHY */
2359 ret = clk_prepare_enable(mdwc->xo_clk);
2360 if (ret)
2361 dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
2362 __func__, ret);
2363
2364 /* Restore controller power collapse */
2365 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2366 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2367 dwc3_msm_config_gdsc(mdwc, 1);
Amit Nischal4d278212016-06-06 17:54:34 +05302368 ret = reset_control_assert(mdwc->core_reset);
2369 if (ret)
2370 dev_err(mdwc->dev, "%s:core_reset assert failed\n",
2371 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002372 /* HW requires a short delay for reset to take place properly */
2373 usleep_range(1000, 1200);
Amit Nischal4d278212016-06-06 17:54:34 +05302374 ret = reset_control_deassert(mdwc->core_reset);
2375 if (ret)
2376 dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
2377 __func__);
Mayank Rana511f3b22016-08-02 12:00:11 -07002378 clk_prepare_enable(mdwc->sleep_clk);
2379 }
2380
2381 /*
2382 * Enable clocks
2383 * Turned ON iface_clk before core_clk due to FSM depedency.
2384 */
2385 clk_prepare_enable(mdwc->iface_clk);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302386 if (mdwc->noc_aggr_clk)
2387 clk_prepare_enable(mdwc->noc_aggr_clk);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002388
2389 core_clk_rate = mdwc->core_clk_rate;
2390 if (mdwc->in_host_mode && mdwc->max_rh_port_speed == USB_SPEED_HIGH) {
2391 core_clk_rate = mdwc->core_clk_rate_hs;
2392 dev_dbg(mdwc->dev, "%s: set hs core clk rate %ld\n", __func__,
2393 core_clk_rate);
2394 }
2395
2396 clk_set_rate(mdwc->core_clk, core_clk_rate);
Mayank Rana511f3b22016-08-02 12:00:11 -07002397 clk_prepare_enable(mdwc->core_clk);
Hemant Kumar5fa38932016-10-27 11:58:37 -07002398
2399 /* set Memory core: ON, Memory periphery: ON */
2400 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_MEM);
2401 clk_set_flags(mdwc->core_clk, CLKFLAG_RETAIN_PERIPH);
2402
Mayank Rana511f3b22016-08-02 12:00:11 -07002403 clk_prepare_enable(mdwc->utmi_clk);
2404 if (mdwc->bus_aggr_clk)
2405 clk_prepare_enable(mdwc->bus_aggr_clk);
2406
2407 /* Resume SS PHY */
Hemant Kumarde1df692016-04-26 19:36:48 -07002408 if (dwc->maximum_speed == USB_SPEED_SUPER &&
2409 mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
Mayank Rana511f3b22016-08-02 12:00:11 -07002410 mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
2411 if (mdwc->typec_orientation == ORIENTATION_CC1)
2412 mdwc->ss_phy->flags |= PHY_LANE_A;
2413 if (mdwc->typec_orientation == ORIENTATION_CC2)
2414 mdwc->ss_phy->flags |= PHY_LANE_B;
2415 usb_phy_set_suspend(mdwc->ss_phy, 0);
2416 mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
2417 mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
2418 }
2419
2420 mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
2421 /* Resume HS PHY */
2422 usb_phy_set_suspend(mdwc->hs_phy, 0);
2423
2424 /* Recover from controller power collapse */
2425 if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
2426 u32 tmp;
2427
Jack Pham9faa51df2017-04-03 18:13:40 -07002428 if (mdwc->iommu_map) {
2429 ret = arm_iommu_attach_device(mdwc->dev,
2430 mdwc->iommu_map);
2431 if (ret)
2432 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
2433 ret);
2434 else
2435 dev_dbg(mdwc->dev, "attached to IOMMU\n");
2436 }
2437
Mayank Rana511f3b22016-08-02 12:00:11 -07002438 dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
2439
2440 dwc3_msm_power_collapse_por(mdwc);
2441
2442 /* Get initial P3 status and enable IN_P3 event */
2443 tmp = dwc3_msm_read_reg_field(mdwc->base,
2444 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2445 atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
2446 dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
2447 PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
2448
2449 mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
2450 }
2451
2452 atomic_set(&dwc->in_lpm, 0);
2453
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302454 /* enable power evt irq for IN P3 detection */
Mayank Ranad339abe2017-05-31 09:19:49 -07002455 enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Vijayavardhan Vennapusa6a4c1d92016-12-08 13:06:26 +05302456
Mayank Rana511f3b22016-08-02 12:00:11 -07002457 /* Disable HSPHY auto suspend */
2458 dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
2459 dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
2460 ~(DWC3_GUSB2PHYCFG_ENBLSLPM |
2461 DWC3_GUSB2PHYCFG_SUSPHY));
2462
2463 /* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
2464 if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
Mayank Ranad339abe2017-05-31 09:19:49 -07002465 if (mdwc->use_pdc_interrupts) {
2466 enable_usb_pdc_interrupt(mdwc, false);
2467 } else {
2468 uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
2469 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
2470 uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
2471 configure_nonpdc_usb_interrupt(mdwc, uirq, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07002472 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002473 mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
2474 }
2475
2476 dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
2477
Mayank Rana511f3b22016-08-02 12:00:11 -07002478 /* Enable core irq */
2479 if (dwc->irq)
2480 enable_irq(dwc->irq);
2481
2482 /*
2483 * Handle other power events that could not have been handled during
2484 * Low Power Mode
2485 */
2486 dwc3_pwr_event_handler(mdwc);
2487
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05302488 if (pm_qos_request_active(&mdwc->pm_qos_req_dma))
2489 schedule_delayed_work(&mdwc->perf_vote_work,
2490 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
2491
Mayank Rana08e41922017-03-02 15:25:48 -08002492 dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm));
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05302493 mutex_unlock(&mdwc->suspend_resume_mutex);
2494
Mayank Rana511f3b22016-08-02 12:00:11 -07002495 return 0;
2496}
2497
2498/**
2499 * dwc3_ext_event_notify - callback to handle events from external transceiver
2500 *
2501 * Returns 0 on success
2502 */
2503static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
2504{
2505 /* Flush processing any pending events before handling new ones */
2506 flush_delayed_work(&mdwc->sm_work);
2507
2508 if (mdwc->id_state == DWC3_ID_FLOAT) {
2509 dev_dbg(mdwc->dev, "XCVR: ID set\n");
2510 set_bit(ID, &mdwc->inputs);
2511 } else {
2512 dev_dbg(mdwc->dev, "XCVR: ID clear\n");
2513 clear_bit(ID, &mdwc->inputs);
2514 }
2515
2516 if (mdwc->vbus_active && !mdwc->in_restart) {
2517 dev_dbg(mdwc->dev, "XCVR: BSV set\n");
2518 set_bit(B_SESS_VLD, &mdwc->inputs);
2519 } else {
2520 dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
2521 clear_bit(B_SESS_VLD, &mdwc->inputs);
2522 }
2523
2524 if (mdwc->suspend) {
2525 dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
2526 set_bit(B_SUSPEND, &mdwc->inputs);
2527 } else {
2528 dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
2529 clear_bit(B_SUSPEND, &mdwc->inputs);
2530 }
2531
2532 schedule_delayed_work(&mdwc->sm_work, 0);
2533}
2534
2535static void dwc3_resume_work(struct work_struct *w)
2536{
2537 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
Mayank Rana08e41922017-03-02 15:25:48 -08002538 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Jack Pham4e9dff72017-04-04 18:05:53 -07002539 union extcon_property_value val;
2540 unsigned int extcon_id;
2541 struct extcon_dev *edev = NULL;
2542 int ret = 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07002543
2544 dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
2545
Jack Pham4e9dff72017-04-04 18:05:53 -07002546 if (mdwc->vbus_active) {
2547 edev = mdwc->extcon_vbus;
2548 extcon_id = EXTCON_USB;
2549 } else if (mdwc->id_state == DWC3_ID_GROUND) {
2550 edev = mdwc->extcon_id;
2551 extcon_id = EXTCON_USB_HOST;
2552 }
2553
2554 /* Check speed and Type-C polarity values in order to configure PHY */
2555 if (edev && extcon_get_state(edev, extcon_id)) {
2556 ret = extcon_get_property(edev, extcon_id,
2557 EXTCON_PROP_USB_SS, &val);
2558
2559 /* Use default dwc->maximum_speed if speed isn't reported */
2560 if (!ret)
2561 dwc->maximum_speed = (val.intval == 0) ?
2562 USB_SPEED_HIGH : USB_SPEED_SUPER;
2563
2564 if (dwc->maximum_speed > dwc->max_hw_supp_speed)
2565 dwc->maximum_speed = dwc->max_hw_supp_speed;
2566
Mayank Ranaf70d8212017-06-12 14:02:07 -07002567 if (override_usb_speed &&
2568 is_valid_usb_speed(dwc, override_usb_speed)) {
2569 dwc->maximum_speed = override_usb_speed;
2570 dbg_event(0xFF, "override_speed", override_usb_speed);
2571 }
2572
Jack Pham4e9dff72017-04-04 18:05:53 -07002573 dbg_event(0xFF, "speed", dwc->maximum_speed);
2574
2575 ret = extcon_get_property(edev, extcon_id,
2576 EXTCON_PROP_USB_TYPEC_POLARITY, &val);
2577 if (ret)
2578 mdwc->typec_orientation = ORIENTATION_NONE;
2579 else
2580 mdwc->typec_orientation = val.intval ?
2581 ORIENTATION_CC2 : ORIENTATION_CC1;
2582
2583 dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
2584 }
2585
Mayank Rana511f3b22016-08-02 12:00:11 -07002586 /*
2587 * exit LPM first to meet resume timeline from device side.
2588 * resume_pending flag would prevent calling
2589 * dwc3_msm_resume() in case we are here due to system
2590 * wide resume without usb cable connected. This flag is set
2591 * only in case of power event irq in lpm.
2592 */
2593 if (mdwc->resume_pending) {
2594 dwc3_msm_resume(mdwc);
2595 mdwc->resume_pending = false;
2596 }
2597
Mayank Rana08e41922017-03-02 15:25:48 -08002598 if (atomic_read(&mdwc->pm_suspended)) {
2599 dbg_event(0xFF, "RWrk PMSus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07002600 /* let pm resume kick in resume work later */
2601 return;
Mayank Rana08e41922017-03-02 15:25:48 -08002602 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002603 dwc3_ext_event_notify(mdwc);
2604}
2605
2606static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
2607{
2608 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2609 u32 irq_stat, irq_clear = 0;
2610
2611 irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
2612 dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
2613
2614 /* Check for P3 events */
2615 if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
2616 (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
2617 /* Can't tell if entered or exit P3, so check LINKSTATE */
2618 u32 ls = dwc3_msm_read_reg_field(mdwc->base,
2619 DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
2620 dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
2621 atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
2622
2623 irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2624 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2625 irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
2626 PWR_EVNT_POWERDOWN_IN_P3_MASK);
2627 } else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
2628 atomic_set(&mdwc->in_p3, 0);
2629 irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2630 irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
2631 } else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
2632 atomic_set(&mdwc->in_p3, 1);
2633 irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
2634 irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
2635 }
2636
2637 /* Clear L2 exit */
2638 if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
2639 irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
2640 irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
2641 }
2642
2643 /* Handle exit from L1 events */
2644 if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
2645 dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
2646 __func__);
2647 if (usb_gadget_wakeup(&dwc->gadget))
2648 dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
2649 __func__);
2650 irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
2651 irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
2652 }
2653
2654 /* Unhandled events */
2655 if (irq_stat)
2656 dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
2657 __func__, irq_stat);
2658
2659 dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
2660}
2661
2662static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
2663{
2664 struct dwc3_msm *mdwc = _mdwc;
2665 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2666
2667 dev_dbg(mdwc->dev, "%s\n", __func__);
2668
2669 if (atomic_read(&dwc->in_lpm))
2670 dwc3_resume_work(&mdwc->resume_work);
2671 else
2672 dwc3_pwr_event_handler(mdwc);
2673
Mayank Rana08e41922017-03-02 15:25:48 -08002674 dbg_event(0xFF, "PWR IRQ", atomic_read(&dwc->in_lpm));
Mayank Rana511f3b22016-08-02 12:00:11 -07002675 return IRQ_HANDLED;
2676}
2677
2678static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
2679{
2680 struct dwc3_msm *mdwc = data;
2681 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2682
2683 dwc->t_pwr_evt_irq = ktime_get();
2684 dev_dbg(mdwc->dev, "%s received\n", __func__);
2685 /*
2686 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
2687 * which interrupts have been triggered, as the clocks are disabled.
2688 * Resume controller by waking up pwr event irq thread.After re-enabling
2689 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
2690 * all other power events.
2691 */
2692 if (atomic_read(&dwc->in_lpm)) {
2693 /* set this to call dwc3_msm_resume() */
2694 mdwc->resume_pending = true;
2695 return IRQ_WAKE_THREAD;
2696 }
2697
2698 dwc3_pwr_event_handler(mdwc);
2699 return IRQ_HANDLED;
2700}
2701
2702static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
2703 unsigned long action, void *hcpu)
2704{
2705 uint32_t cpu = (uintptr_t)hcpu;
2706 struct dwc3_msm *mdwc =
2707 container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
2708
2709 if (cpu == cpu_to_affin && action == CPU_ONLINE) {
2710 pr_debug("%s: cpu online:%u irq:%d\n", __func__,
2711 cpu_to_affin, mdwc->irq_to_affin);
2712 irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
2713 }
2714
2715 return NOTIFY_OK;
2716}
2717
2718static void dwc3_otg_sm_work(struct work_struct *w);
2719
2720static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
2721{
2722 int ret;
2723
2724 mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
2725 if (IS_ERR(mdwc->dwc3_gdsc))
2726 mdwc->dwc3_gdsc = NULL;
2727
2728 mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
2729 if (IS_ERR(mdwc->xo_clk)) {
2730 dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
2731 __func__);
2732 ret = PTR_ERR(mdwc->xo_clk);
2733 return ret;
2734 }
2735 clk_set_rate(mdwc->xo_clk, 19200000);
2736
2737 mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
2738 if (IS_ERR(mdwc->iface_clk)) {
2739 dev_err(mdwc->dev, "failed to get iface_clk\n");
2740 ret = PTR_ERR(mdwc->iface_clk);
2741 return ret;
2742 }
2743
2744 /*
2745 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
2746 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
2747 * On newer platform it can run at 150MHz as well.
2748 */
2749 mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
2750 if (IS_ERR(mdwc->core_clk)) {
2751 dev_err(mdwc->dev, "failed to get core_clk\n");
2752 ret = PTR_ERR(mdwc->core_clk);
2753 return ret;
2754 }
2755
Amit Nischal4d278212016-06-06 17:54:34 +05302756 mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
2757 if (IS_ERR(mdwc->core_reset)) {
2758 dev_err(mdwc->dev, "failed to get core_reset\n");
2759 return PTR_ERR(mdwc->core_reset);
2760 }
2761
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302762 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate",
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302763 (u32 *)&mdwc->core_clk_rate)) {
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302764 dev_err(mdwc->dev, "USB core-clk-rate is not present\n");
2765 return -EINVAL;
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302766 }
2767
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302768 mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk,
Vijayavardhan Vennapusa3e668f32016-01-08 15:58:35 +05302769 mdwc->core_clk_rate);
Vijayavardhan Vennapusa8e6a11e2016-12-06 12:04:21 +05302770 dev_dbg(mdwc->dev, "USB core frequency = %ld\n",
2771 mdwc->core_clk_rate);
2772 ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
2773 if (ret)
2774 dev_err(mdwc->dev, "fail to set core_clk freq:%d\n", ret);
Mayank Rana511f3b22016-08-02 12:00:11 -07002775
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08002776 if (of_property_read_u32(mdwc->dev->of_node, "qcom,core-clk-rate-hs",
2777 (u32 *)&mdwc->core_clk_rate_hs)) {
2778 dev_dbg(mdwc->dev, "USB core-clk-rate-hs is not present\n");
2779 mdwc->core_clk_rate_hs = mdwc->core_clk_rate;
2780 }
2781
Mayank Rana511f3b22016-08-02 12:00:11 -07002782 mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
2783 if (IS_ERR(mdwc->sleep_clk)) {
2784 dev_err(mdwc->dev, "failed to get sleep_clk\n");
2785 ret = PTR_ERR(mdwc->sleep_clk);
2786 return ret;
2787 }
2788
2789 clk_set_rate(mdwc->sleep_clk, 32000);
2790 mdwc->utmi_clk_rate = 19200000;
2791 mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
2792 if (IS_ERR(mdwc->utmi_clk)) {
2793 dev_err(mdwc->dev, "failed to get utmi_clk\n");
2794 ret = PTR_ERR(mdwc->utmi_clk);
2795 return ret;
2796 }
2797
2798 clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
2799 mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
2800 if (IS_ERR(mdwc->bus_aggr_clk))
2801 mdwc->bus_aggr_clk = NULL;
2802
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05302803 mdwc->noc_aggr_clk = devm_clk_get(mdwc->dev, "noc_aggr_clk");
2804 if (IS_ERR(mdwc->noc_aggr_clk))
2805 mdwc->noc_aggr_clk = NULL;
2806
Mayank Rana511f3b22016-08-02 12:00:11 -07002807 if (of_property_match_string(mdwc->dev->of_node,
2808 "clock-names", "cfg_ahb_clk") >= 0) {
2809 mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
2810 if (IS_ERR(mdwc->cfg_ahb_clk)) {
2811 ret = PTR_ERR(mdwc->cfg_ahb_clk);
2812 mdwc->cfg_ahb_clk = NULL;
2813 if (ret != -EPROBE_DEFER)
2814 dev_err(mdwc->dev,
2815 "failed to get cfg_ahb_clk ret %d\n",
2816 ret);
2817 return ret;
2818 }
2819 }
2820
2821 return 0;
2822}
2823
2824static int dwc3_msm_id_notifier(struct notifier_block *nb,
2825 unsigned long event, void *ptr)
2826{
2827 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
Hemant Kumarde1df692016-04-26 19:36:48 -07002828 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002829 enum dwc3_id_state id;
Mayank Rana511f3b22016-08-02 12:00:11 -07002830
2831 id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
2832
2833 dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
2834
Mayank Rana511f3b22016-08-02 12:00:11 -07002835 if (mdwc->id_state != id) {
2836 mdwc->id_state = id;
Mayank Rana08e41922017-03-02 15:25:48 -08002837 dbg_event(0xFF, "id_state", mdwc->id_state);
Mayank Rana511f3b22016-08-02 12:00:11 -07002838 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2839 }
2840
Mayank Rana511f3b22016-08-02 12:00:11 -07002841 return NOTIFY_DONE;
2842}
2843
Hemant Kumar006fae42017-07-12 18:11:25 -07002844
2845static void check_for_sdp_connection(struct work_struct *w)
2846{
Hemant Kumar006fae42017-07-12 18:11:25 -07002847 struct dwc3_msm *mdwc =
2848 container_of(w, struct dwc3_msm, sdp_check.work);
2849 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
2850
2851 if (!mdwc->vbus_active)
2852 return;
2853
2854 /* floating D+/D- lines detected */
2855 if (dwc->gadget.state < USB_STATE_DEFAULT &&
2856 dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
Hemant Kumar006fae42017-07-12 18:11:25 -07002857 mdwc->vbus_active = 0;
2858 dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
2859 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
2860 }
2861}
2862
Mayank Rana511f3b22016-08-02 12:00:11 -07002863static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
2864 unsigned long event, void *ptr)
2865{
2866 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
2867 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07002868
2869 dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
2870
2871 if (mdwc->vbus_active == event)
2872 return NOTIFY_DONE;
2873
Mayank Rana511f3b22016-08-02 12:00:11 -07002874 mdwc->vbus_active = event;
Mayank Rana83ad5822016-08-09 14:17:22 -07002875 if (dwc->is_drd && !mdwc->in_restart)
Mayank Rana511f3b22016-08-02 12:00:11 -07002876 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002877
Mayank Rana511f3b22016-08-02 12:00:11 -07002878 return NOTIFY_DONE;
2879}
Jack Pham4e9dff72017-04-04 18:05:53 -07002880
Mayank Rana51958172017-02-28 14:49:21 -08002881/*
Mayank Rana25d02862017-09-12 14:49:41 -07002882 * Handle EUD based soft detach/attach event
Mayank Rana51958172017-02-28 14:49:21 -08002883 *
2884 * @nb - notifier handler
2885 * @event - event information i.e. soft detach/attach event
2886 * @ptr - extcon_dev pointer
2887 *
2888 * @return int - NOTIFY_DONE always due to EUD
2889 */
2890static int dwc3_msm_eud_notifier(struct notifier_block *nb,
2891 unsigned long event, void *ptr)
2892{
2893 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
2894 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana51958172017-02-28 14:49:21 -08002895
2896 dbg_event(0xFF, "EUD_NB", event);
2897 dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
2898 if (mdwc->vbus_active == event)
2899 return NOTIFY_DONE;
2900
Mayank Rana51958172017-02-28 14:49:21 -08002901 mdwc->vbus_active = event;
2902 if (dwc->is_drd && !mdwc->in_restart)
2903 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
Jack Pham4e9dff72017-04-04 18:05:53 -07002904
Mayank Rana51958172017-02-28 14:49:21 -08002905 return NOTIFY_DONE;
2906}
Mayank Rana511f3b22016-08-02 12:00:11 -07002907
Pratham Pratapd76a1782017-11-14 20:50:31 +05302908static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc, int start_idx)
Mayank Rana511f3b22016-08-02 12:00:11 -07002909{
2910 struct device_node *node = mdwc->dev->of_node;
2911 struct extcon_dev *edev;
2912 int ret = 0;
2913
2914 if (!of_property_read_bool(node, "extcon"))
2915 return 0;
2916
Pratham Pratapd76a1782017-11-14 20:50:31 +05302917 /*
2918 * Use mandatory phandle (index 0 for type-C; index 3 for microUSB)
2919 * for USB vbus status notification
2920 */
2921 edev = extcon_get_edev_by_phandle(mdwc->dev, start_idx);
Mayank Rana511f3b22016-08-02 12:00:11 -07002922 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
2923 return PTR_ERR(edev);
2924
2925 if (!IS_ERR(edev)) {
2926 mdwc->extcon_vbus = edev;
2927 mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
2928 ret = extcon_register_notifier(edev, EXTCON_USB,
2929 &mdwc->vbus_nb);
2930 if (ret < 0) {
2931 dev_err(mdwc->dev, "failed to register notifier for USB\n");
2932 return ret;
2933 }
2934 }
2935
Pratham Pratapd76a1782017-11-14 20:50:31 +05302936 /*
2937 * Use optional phandle (index 1 for type-C; index 4 for microUSB)
2938 * for USB ID status notification
2939 */
2940 if (of_count_phandle_with_args(node, "extcon", NULL) > start_idx + 1) {
2941 edev = extcon_get_edev_by_phandle(mdwc->dev, start_idx + 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07002942 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2943 ret = PTR_ERR(edev);
2944 goto err;
2945 }
2946 }
2947
2948 if (!IS_ERR(edev)) {
2949 mdwc->extcon_id = edev;
2950 mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
Mayank Rana54d60432017-07-18 12:10:04 -07002951 mdwc->host_restart_nb.notifier_call =
2952 dwc3_restart_usb_host_mode;
Mayank Rana511f3b22016-08-02 12:00:11 -07002953 ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
2954 &mdwc->id_nb);
2955 if (ret < 0) {
2956 dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
2957 goto err;
2958 }
Mayank Rana54d60432017-07-18 12:10:04 -07002959
2960 ret = extcon_register_blocking_notifier(edev, EXTCON_USB_HOST,
2961 &mdwc->host_restart_nb);
2962 if (ret < 0) {
2963 dev_err(mdwc->dev, "failed to register blocking notifier\n");
2964 goto err1;
2965 }
Mayank Rana511f3b22016-08-02 12:00:11 -07002966 }
2967
Mayank Rana81bd2e52017-07-26 16:15:15 -07002968 edev = NULL;
Pratham Pratapd76a1782017-11-14 20:50:31 +05302969 /* Use optional phandle (index 2) for EUD based detach/attach events */
Mayank Rana51958172017-02-28 14:49:21 -08002970 if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
2971 edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
2972 if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
2973 ret = PTR_ERR(edev);
Pratham Pratapd76a1782017-11-14 20:50:31 +05302974 goto err2;
Mayank Rana51958172017-02-28 14:49:21 -08002975 }
2976 }
2977
Mayank Rana81bd2e52017-07-26 16:15:15 -07002978 if (!IS_ERR_OR_NULL(edev)) {
Mayank Rana51958172017-02-28 14:49:21 -08002979 mdwc->extcon_eud = edev;
2980 mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
2981 ret = extcon_register_notifier(edev, EXTCON_USB,
2982 &mdwc->eud_event_nb);
2983 if (ret < 0) {
2984 dev_err(mdwc->dev, "failed to register notifier for EUD-USB\n");
Mayank Rana54d60432017-07-18 12:10:04 -07002985 goto err2;
Mayank Rana51958172017-02-28 14:49:21 -08002986 }
2987 }
2988
Mayank Rana511f3b22016-08-02 12:00:11 -07002989 return 0;
Mayank Rana54d60432017-07-18 12:10:04 -07002990err2:
2991 if (mdwc->extcon_id)
2992 extcon_unregister_blocking_notifier(mdwc->extcon_id,
2993 EXTCON_USB_HOST, &mdwc->host_restart_nb);
Mayank Rana51958172017-02-28 14:49:21 -08002994err1:
2995 if (mdwc->extcon_id)
2996 extcon_unregister_notifier(mdwc->extcon_id, EXTCON_USB_HOST,
2997 &mdwc->id_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07002998err:
2999 if (mdwc->extcon_vbus)
3000 extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
3001 &mdwc->vbus_nb);
3002 return ret;
3003}
3004
Mayank Rana00d6f722017-09-18 17:22:03 -07003005#define SMMU_BASE 0x60000000 /* Device address range base */
3006#define SMMU_SIZE 0x90000000 /* Device address range size */
Jack Phambbe27962017-03-23 18:42:26 -07003007
3008static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
3009{
3010 struct device_node *node = mdwc->dev->of_node;
Jack Pham283cece2017-04-05 09:58:17 -07003011 int atomic_ctx = 1, s1_bypass;
Jack Phambbe27962017-03-23 18:42:26 -07003012 int ret;
3013
3014 if (!of_property_read_bool(node, "iommus"))
3015 return 0;
3016
3017 mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
3018 SMMU_BASE, SMMU_SIZE);
3019 if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
3020 ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
3021 dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
3022 ret);
3023 return ret;
3024 }
3025 dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
Mayank Rana377ddf42017-09-05 15:09:12 -07003026 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
3027 DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR, &atomic_ctx);
3028 if (ret) {
3029 dev_err(mdwc->dev, "set UPSTREAM_IOVA_ALLOCATOR failed(%d)\n",
3030 ret);
3031 goto release_mapping;
3032 }
Jack Phambbe27962017-03-23 18:42:26 -07003033
3034 ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
3035 &atomic_ctx);
3036 if (ret) {
3037 dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
3038 ret);
Jack Pham9faa51df2017-04-03 18:13:40 -07003039 goto release_mapping;
Jack Phambbe27962017-03-23 18:42:26 -07003040 }
3041
Jack Pham283cece2017-04-05 09:58:17 -07003042 s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
3043 ret = iommu_domain_set_attr(mdwc->iommu_map->domain,
3044 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
3045 if (ret) {
3046 dev_err(mdwc->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
3047 s1_bypass, ret);
3048 goto release_mapping;
3049 }
3050
Jack Pham9faa51df2017-04-03 18:13:40 -07003051 ret = arm_iommu_attach_device(mdwc->dev, mdwc->iommu_map);
3052 if (ret) {
3053 dev_err(mdwc->dev, "IOMMU attach failed (%d)\n", ret);
3054 goto release_mapping;
3055 }
3056 dev_dbg(mdwc->dev, "attached to IOMMU\n");
3057
Jack Phambbe27962017-03-23 18:42:26 -07003058 return 0;
Jack Pham9faa51df2017-04-03 18:13:40 -07003059
3060release_mapping:
3061 arm_iommu_release_mapping(mdwc->iommu_map);
3062 mdwc->iommu_map = NULL;
3063 return ret;
Jack Phambbe27962017-03-23 18:42:26 -07003064}
3065
Mayank Rana511f3b22016-08-02 12:00:11 -07003066static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
3067 char *buf)
3068{
3069 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3070
3071 if (mdwc->vbus_active)
3072 return snprintf(buf, PAGE_SIZE, "peripheral\n");
3073 if (mdwc->id_state == DWC3_ID_GROUND)
3074 return snprintf(buf, PAGE_SIZE, "host\n");
3075
3076 return snprintf(buf, PAGE_SIZE, "none\n");
3077}
3078
3079static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
3080 const char *buf, size_t count)
3081{
3082 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3083
3084 if (sysfs_streq(buf, "peripheral")) {
3085 mdwc->vbus_active = true;
3086 mdwc->id_state = DWC3_ID_FLOAT;
3087 } else if (sysfs_streq(buf, "host")) {
3088 mdwc->vbus_active = false;
3089 mdwc->id_state = DWC3_ID_GROUND;
3090 } else {
3091 mdwc->vbus_active = false;
3092 mdwc->id_state = DWC3_ID_FLOAT;
3093 }
3094
3095 dwc3_ext_event_notify(mdwc);
3096
3097 return count;
3098}
3099
3100static DEVICE_ATTR_RW(mode);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303101static void msm_dwc3_perf_vote_work(struct work_struct *w);
Mayank Rana511f3b22016-08-02 12:00:11 -07003102
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003103static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
3104 char *buf)
3105{
3106 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3107 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3108
3109 return snprintf(buf, PAGE_SIZE, "%s\n",
3110 usb_speed_string(dwc->max_hw_supp_speed));
3111}
3112
3113static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
3114 const char *buf, size_t count)
3115{
3116 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
3117 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3118 enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
3119
3120 if (sysfs_streq(buf, "high"))
3121 req_speed = USB_SPEED_HIGH;
3122 else if (sysfs_streq(buf, "super"))
3123 req_speed = USB_SPEED_SUPER;
3124
3125 if (req_speed != USB_SPEED_UNKNOWN &&
3126 req_speed != dwc->max_hw_supp_speed) {
3127 dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
3128 schedule_work(&mdwc->restart_usb_work);
3129 }
3130
3131 return count;
3132}
3133static DEVICE_ATTR_RW(speed);
3134
Mayank Rana511f3b22016-08-02 12:00:11 -07003135static int dwc3_msm_probe(struct platform_device *pdev)
3136{
3137 struct device_node *node = pdev->dev.of_node, *dwc3_node;
3138 struct device *dev = &pdev->dev;
Hemant Kumar8220a982017-01-19 18:11:34 -08003139 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003140 struct dwc3_msm *mdwc;
3141 struct dwc3 *dwc;
3142 struct resource *res;
3143 void __iomem *tcsr;
3144 bool host_mode;
Mayank Ranad339abe2017-05-31 09:19:49 -07003145 int ret = 0, i;
Mayank Rana511f3b22016-08-02 12:00:11 -07003146 int ext_hub_reset_gpio;
3147 u32 val;
Mayank Ranad339abe2017-05-31 09:19:49 -07003148 unsigned long irq_type;
Mayank Rana511f3b22016-08-02 12:00:11 -07003149
3150 mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
3151 if (!mdwc)
3152 return -ENOMEM;
3153
Mayank Rana511f3b22016-08-02 12:00:11 -07003154 platform_set_drvdata(pdev, mdwc);
3155 mdwc->dev = &pdev->dev;
3156
3157 INIT_LIST_HEAD(&mdwc->req_complete_list);
3158 INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
3159 INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
Jack Pham4b8b4ae2016-08-09 11:36:34 -07003160 INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003161 INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303162 INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
Hemant Kumar006fae42017-07-12 18:11:25 -07003163 INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
Mayank Rana511f3b22016-08-02 12:00:11 -07003164
3165 mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
3166 if (!mdwc->dwc3_wq) {
3167 pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
3168 return -ENOMEM;
3169 }
3170
3171 /* Get all clks and gdsc reference */
3172 ret = dwc3_msm_get_clk_gdsc(mdwc);
3173 if (ret) {
3174 dev_err(&pdev->dev, "error getting clock or gdsc.\n");
Ziqi Chen0ea81162017-08-04 18:17:55 +08003175 goto err;
Mayank Rana511f3b22016-08-02 12:00:11 -07003176 }
3177
3178 mdwc->id_state = DWC3_ID_FLOAT;
3179 set_bit(ID, &mdwc->inputs);
3180
3181 mdwc->charging_disabled = of_property_read_bool(node,
3182 "qcom,charging-disabled");
3183
3184 ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
3185 &mdwc->lpm_to_suspend_delay);
3186 if (ret) {
3187 dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
3188 mdwc->lpm_to_suspend_delay = 0;
3189 }
3190
Mayank Ranad339abe2017-05-31 09:19:49 -07003191 memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
3192 for (i = 0; i < USB_MAX_IRQ; i++) {
3193 irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
3194 IRQF_ONESHOT;
3195 mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
3196 mdwc->wakeup_irq[i].name);
3197 if (mdwc->wakeup_irq[i].irq < 0) {
3198 /* pwr_evnt_irq is only mandatory irq */
3199 if (!strcmp(mdwc->wakeup_irq[i].name,
3200 "pwr_event_irq")) {
3201 dev_err(&pdev->dev, "get_irq for %s failed\n\n",
3202 mdwc->wakeup_irq[i].name);
3203 ret = -EINVAL;
3204 goto err;
3205 }
3206 mdwc->wakeup_irq[i].irq = 0;
3207 } else {
3208 irq_set_status_flags(mdwc->wakeup_irq[i].irq,
3209 IRQ_NOAUTOEN);
3210 /* ss_phy_irq is level trigger interrupt */
3211 if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
3212 irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
3213 IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
Mayank Rana511f3b22016-08-02 12:00:11 -07003214
Mayank Ranad339abe2017-05-31 09:19:49 -07003215 ret = devm_request_threaded_irq(&pdev->dev,
3216 mdwc->wakeup_irq[i].irq,
Mayank Rana511f3b22016-08-02 12:00:11 -07003217 msm_dwc3_pwr_irq,
3218 msm_dwc3_pwr_irq_thread,
Mayank Ranad339abe2017-05-31 09:19:49 -07003219 irq_type,
3220 mdwc->wakeup_irq[i].name, mdwc);
3221 if (ret) {
3222 dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
3223 mdwc->wakeup_irq[i].name, ret);
3224 goto err;
3225 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003226 }
3227 }
3228
3229 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
3230 if (!res) {
3231 dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
3232 } else {
3233 tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
3234 resource_size(res));
3235 if (IS_ERR_OR_NULL(tcsr)) {
3236 dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
3237 } else {
3238 /* Enable USB3 on the primary USB port. */
3239 writel_relaxed(0x1, tcsr);
3240 /*
3241 * Ensure that TCSR write is completed before
3242 * USB registers initialization.
3243 */
3244 mb();
3245 }
3246 }
3247
3248 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
3249 if (!res) {
3250 dev_err(&pdev->dev, "missing memory base resource\n");
3251 ret = -ENODEV;
3252 goto err;
3253 }
3254
3255 mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
3256 resource_size(res));
3257 if (!mdwc->base) {
3258 dev_err(&pdev->dev, "ioremap failed\n");
3259 ret = -ENODEV;
3260 goto err;
3261 }
3262
3263 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3264 "ahb2phy_base");
3265 if (res) {
3266 mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
3267 res->start, resource_size(res));
3268 if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
3269 dev_err(dev, "couldn't find ahb2phy_base addr.\n");
3270 mdwc->ahb2phy_base = NULL;
3271 } else {
3272 /*
3273 * On some targets cfg_ahb_clk depends upon usb gdsc
3274 * regulator. If cfg_ahb_clk is enabled without
3275 * turning on usb gdsc regulator clk is stuck off.
3276 */
3277 dwc3_msm_config_gdsc(mdwc, 1);
3278 clk_prepare_enable(mdwc->cfg_ahb_clk);
3279 /* Configure AHB2PHY for one wait state read/write*/
3280 val = readl_relaxed(mdwc->ahb2phy_base +
3281 PERIPH_SS_AHB2PHY_TOP_CFG);
3282 if (val != ONE_READ_WRITE_WAIT) {
3283 writel_relaxed(ONE_READ_WRITE_WAIT,
3284 mdwc->ahb2phy_base +
3285 PERIPH_SS_AHB2PHY_TOP_CFG);
3286 /* complete above write before using USB PHY */
3287 mb();
3288 }
3289 clk_disable_unprepare(mdwc->cfg_ahb_clk);
3290 dwc3_msm_config_gdsc(mdwc, 0);
3291 }
3292 }
3293
3294 if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
3295 mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
3296 if (IS_ERR(mdwc->dbm)) {
3297 dev_err(&pdev->dev, "unable to get dbm device\n");
3298 ret = -EPROBE_DEFER;
3299 goto err;
3300 }
3301 /*
3302 * Add power event if the dbm indicates coming out of L1
3303 * by interrupt
3304 */
3305 if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
Mayank Ranad339abe2017-05-31 09:19:49 -07003306 if (!mdwc->wakeup_irq[PWR_EVNT_IRQ].irq) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003307 dev_err(&pdev->dev,
3308 "need pwr_event_irq exiting L1\n");
3309 ret = -EINVAL;
3310 goto err;
3311 }
3312 }
3313 }
3314
3315 ext_hub_reset_gpio = of_get_named_gpio(node,
3316 "qcom,ext-hub-reset-gpio", 0);
3317
3318 if (gpio_is_valid(ext_hub_reset_gpio)
3319 && (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
3320 "qcom,ext-hub-reset-gpio"))) {
3321 /* reset external hub */
3322 gpio_direction_output(ext_hub_reset_gpio, 1);
3323 /*
3324 * Hub reset should be asserted for minimum 5microsec
3325 * before deasserting.
3326 */
3327 usleep_range(5, 1000);
3328 gpio_direction_output(ext_hub_reset_gpio, 0);
3329 }
3330
3331 if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
3332 &mdwc->tx_fifo_size))
3333 dev_err(&pdev->dev,
3334 "unable to read platform data tx fifo size\n");
3335
3336 mdwc->disable_host_mode_pm = of_property_read_bool(node,
3337 "qcom,disable-host-mode-pm");
Mayank Ranad339abe2017-05-31 09:19:49 -07003338 mdwc->use_pdc_interrupts = of_property_read_bool(node,
3339 "qcom,use-pdc-interrupts");
Mayank Rana511f3b22016-08-02 12:00:11 -07003340 dwc3_set_notifier(&dwc3_msm_notify_event);
3341
Jack Phambbe27962017-03-23 18:42:26 -07003342 ret = dwc3_msm_init_iommu(mdwc);
3343 if (ret)
3344 goto err;
3345
Mayank Rana42dfac42017-10-03 15:01:03 -07003346 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
3347 dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
3348 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
3349 dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
3350 ret = -EOPNOTSUPP;
3351 goto uninit_iommu;
3352 }
3353 }
3354
Mayank Rana511f3b22016-08-02 12:00:11 -07003355 /* Assumes dwc3 is the first DT child of dwc3-msm */
3356 dwc3_node = of_get_next_available_child(node, NULL);
3357 if (!dwc3_node) {
3358 dev_err(&pdev->dev, "failed to find dwc3 child\n");
3359 ret = -ENODEV;
Jack Phambbe27962017-03-23 18:42:26 -07003360 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003361 }
3362
3363 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3364 if (ret) {
3365 dev_err(&pdev->dev,
3366 "failed to add create dwc3 core\n");
3367 of_node_put(dwc3_node);
Jack Phambbe27962017-03-23 18:42:26 -07003368 goto uninit_iommu;
Mayank Rana511f3b22016-08-02 12:00:11 -07003369 }
3370
3371 mdwc->dwc3 = of_find_device_by_node(dwc3_node);
3372 of_node_put(dwc3_node);
3373 if (!mdwc->dwc3) {
3374 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
3375 goto put_dwc3;
3376 }
3377
3378 mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3379 "usb-phy", 0);
3380 if (IS_ERR(mdwc->hs_phy)) {
3381 dev_err(&pdev->dev, "unable to get hsphy device\n");
3382 ret = PTR_ERR(mdwc->hs_phy);
3383 goto put_dwc3;
3384 }
3385 mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
3386 "usb-phy", 1);
3387 if (IS_ERR(mdwc->ss_phy)) {
3388 dev_err(&pdev->dev, "unable to get ssphy device\n");
3389 ret = PTR_ERR(mdwc->ss_phy);
3390 goto put_dwc3;
3391 }
3392
3393 mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3394 if (mdwc->bus_scale_table) {
3395 mdwc->bus_perf_client =
3396 msm_bus_scale_register_client(mdwc->bus_scale_table);
3397 }
3398
3399 dwc = platform_get_drvdata(mdwc->dwc3);
3400 if (!dwc) {
3401 dev_err(&pdev->dev, "Failed to get dwc3 device\n");
3402 goto put_dwc3;
3403 }
3404
3405 mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
3406 mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
3407
3408 if (cpu_to_affin)
3409 register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3410
Mayank Ranaf4918d32016-12-15 13:35:55 -08003411 ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
3412 &mdwc->num_gsi_event_buffers);
3413
Jack Pham9faa51df2017-04-03 18:13:40 -07003414 /* IOMMU will be reattached upon each resume/connect */
3415 if (mdwc->iommu_map)
3416 arm_iommu_detach_device(mdwc->dev);
3417
Mayank Rana511f3b22016-08-02 12:00:11 -07003418 /*
3419 * Clocks and regulators will not be turned on until the first time
3420 * runtime PM resume is called. This is to allow for booting up with
3421 * charger already connected so as not to disturb PHY line states.
3422 */
3423 mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
3424 atomic_set(&dwc->in_lpm, 1);
Mayank Rana511f3b22016-08-02 12:00:11 -07003425 pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
3426 pm_runtime_use_autosuspend(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003427 device_init_wakeup(mdwc->dev, 1);
3428
3429 if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
3430 pm_runtime_get_noresume(mdwc->dev);
3431
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303432 ret = of_property_read_u32(node, "qcom,pm-qos-latency",
3433 &mdwc->pm_qos_latency);
3434 if (ret) {
3435 dev_dbg(&pdev->dev, "setting pm-qos-latency to zero.\n");
3436 mdwc->pm_qos_latency = 0;
3437 }
3438
Pratham Pratapd76a1782017-11-14 20:50:31 +05303439 mdwc->no_vbus_vote_type_c = of_property_read_bool(node,
3440 "qcom,no-vbus-vote-with-type-C");
3441
3442 /* Mark type-C as true by default */
3443 mdwc->type_c = true;
3444
Hemant Kumar8220a982017-01-19 18:11:34 -08003445 mdwc->usb_psy = power_supply_get_by_name("usb");
3446 if (!mdwc->usb_psy) {
3447 dev_warn(mdwc->dev, "Could not get usb power_supply\n");
3448 pval.intval = -EINVAL;
3449 } else {
3450 power_supply_get_property(mdwc->usb_psy,
Pratham Pratapd76a1782017-11-14 20:50:31 +05303451 POWER_SUPPLY_PROP_CONNECTOR_TYPE, &pval);
3452 if (pval.intval == POWER_SUPPLY_CONNECTOR_MICRO_USB)
3453 mdwc->type_c = false;
3454 power_supply_get_property(mdwc->usb_psy,
Hemant Kumar8220a982017-01-19 18:11:34 -08003455 POWER_SUPPLY_PROP_PRESENT, &pval);
3456 }
3457
Pratham Pratapd76a1782017-11-14 20:50:31 +05303458 /*
3459 * Extcon phandles starting indices in DT:
3460 * type-C : 0
3461 * microUSB : 3
3462 */
3463 ret = dwc3_msm_extcon_register(mdwc, mdwc->type_c ? 0 : 3);
3464 if (ret)
3465 goto put_psy;
3466
Vijayavardhan Vennapusad8a071c2017-09-08 12:51:25 +05303467 mutex_init(&mdwc->suspend_resume_mutex);
Mayank Rana511f3b22016-08-02 12:00:11 -07003468 /* Update initial VBUS/ID state from extcon */
Jack Pham4e9dff72017-04-04 18:05:53 -07003469 if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
Mayank Rana511f3b22016-08-02 12:00:11 -07003470 EXTCON_USB))
3471 dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
Jack Pham4e9dff72017-04-04 18:05:53 -07003472 else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
Mayank Rana511f3b22016-08-02 12:00:11 -07003473 EXTCON_USB_HOST))
3474 dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
Hemant Kumar8220a982017-01-19 18:11:34 -08003475 else if (!pval.intval) {
3476 /* USB cable is not connected */
3477 schedule_delayed_work(&mdwc->sm_work, 0);
3478 } else {
3479 if (pval.intval > 0)
3480 dev_info(mdwc->dev, "charger detection in progress\n");
3481 }
Mayank Rana511f3b22016-08-02 12:00:11 -07003482
3483 device_create_file(&pdev->dev, &dev_attr_mode);
Vamsi Krishna Samavedam17f26db2017-01-31 17:21:23 -08003484 device_create_file(&pdev->dev, &dev_attr_speed);
Mayank Rana511f3b22016-08-02 12:00:11 -07003485
Mayank Rana511f3b22016-08-02 12:00:11 -07003486 host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
3487 if (!dwc->is_drd && host_mode) {
3488 dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
3489 mdwc->id_state = DWC3_ID_GROUND;
3490 dwc3_ext_event_notify(mdwc);
3491 }
3492
3493 return 0;
3494
Pratham Pratapd76a1782017-11-14 20:50:31 +05303495put_psy:
3496 if (mdwc->usb_psy)
3497 power_supply_put(mdwc->usb_psy);
3498
3499 if (cpu_to_affin)
3500 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
Mayank Rana511f3b22016-08-02 12:00:11 -07003501put_dwc3:
Mayank Rana511f3b22016-08-02 12:00:11 -07003502 if (mdwc->bus_perf_client)
3503 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
Ziqi Chen0ea81162017-08-04 18:17:55 +08003504
Jack Phambbe27962017-03-23 18:42:26 -07003505uninit_iommu:
Jack Pham9faa51df2017-04-03 18:13:40 -07003506 if (mdwc->iommu_map) {
3507 arm_iommu_detach_device(mdwc->dev);
Jack Phambbe27962017-03-23 18:42:26 -07003508 arm_iommu_release_mapping(mdwc->iommu_map);
Jack Pham9faa51df2017-04-03 18:13:40 -07003509 }
Ziqi Chen0ea81162017-08-04 18:17:55 +08003510 of_platform_depopulate(&pdev->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003511err:
Ziqi Chen0ea81162017-08-04 18:17:55 +08003512 destroy_workqueue(mdwc->dwc3_wq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003513 return ret;
3514}
3515
Mayank Rana511f3b22016-08-02 12:00:11 -07003516static int dwc3_msm_remove(struct platform_device *pdev)
3517{
3518 struct dwc3_msm *mdwc = platform_get_drvdata(pdev);
Mayank Rana08e41922017-03-02 15:25:48 -08003519 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07003520 int ret_pm;
3521
3522 device_remove_file(&pdev->dev, &dev_attr_mode);
Pratham Pratapd76a1782017-11-14 20:50:31 +05303523 if (mdwc->usb_psy)
3524 power_supply_put(mdwc->usb_psy);
Mayank Rana511f3b22016-08-02 12:00:11 -07003525
3526 if (cpu_to_affin)
3527 unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
3528
3529 /*
3530 * In case of system suspend, pm_runtime_get_sync fails.
3531 * Hence turn ON the clocks manually.
3532 */
3533 ret_pm = pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003534 dbg_event(0xFF, "Remov gsyn", ret_pm);
Mayank Rana511f3b22016-08-02 12:00:11 -07003535 if (ret_pm < 0) {
3536 dev_err(mdwc->dev,
3537 "pm_runtime_get_sync failed with %d\n", ret_pm);
Vijayavardhan Vennapusa934d9cd2016-11-30 13:10:01 +05303538 if (mdwc->noc_aggr_clk)
3539 clk_prepare_enable(mdwc->noc_aggr_clk);
Mayank Rana511f3b22016-08-02 12:00:11 -07003540 clk_prepare_enable(mdwc->utmi_clk);
3541 clk_prepare_enable(mdwc->core_clk);
3542 clk_prepare_enable(mdwc->iface_clk);
3543 clk_prepare_enable(mdwc->sleep_clk);
3544 if (mdwc->bus_aggr_clk)
3545 clk_prepare_enable(mdwc->bus_aggr_clk);
3546 clk_prepare_enable(mdwc->xo_clk);
3547 }
3548
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303549 cancel_delayed_work_sync(&mdwc->perf_vote_work);
Mayank Rana511f3b22016-08-02 12:00:11 -07003550 cancel_delayed_work_sync(&mdwc->sm_work);
3551
3552 if (mdwc->hs_phy)
3553 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Ziqi Chen0ea81162017-08-04 18:17:55 +08003554 of_platform_depopulate(&pdev->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07003555
Mayank Rana08e41922017-03-02 15:25:48 -08003556 dbg_event(0xFF, "Remov put", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07003557 pm_runtime_disable(mdwc->dev);
3558 pm_runtime_barrier(mdwc->dev);
3559 pm_runtime_put_sync(mdwc->dev);
3560 pm_runtime_set_suspended(mdwc->dev);
3561 device_wakeup_disable(mdwc->dev);
3562
3563 if (mdwc->bus_perf_client)
3564 msm_bus_scale_unregister_client(mdwc->bus_perf_client);
3565
3566 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
3567 regulator_disable(mdwc->vbus_reg);
3568
Mayank Ranad339abe2017-05-31 09:19:49 -07003569 if (mdwc->wakeup_irq[HS_PHY_IRQ].irq)
3570 disable_irq(mdwc->wakeup_irq[HS_PHY_IRQ].irq);
3571 if (mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq)
3572 disable_irq(mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq);
3573 if (mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq)
3574 disable_irq(mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq);
3575 if (mdwc->wakeup_irq[SS_PHY_IRQ].irq)
3576 disable_irq(mdwc->wakeup_irq[SS_PHY_IRQ].irq);
3577 disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
Mayank Rana511f3b22016-08-02 12:00:11 -07003578
3579 clk_disable_unprepare(mdwc->utmi_clk);
3580 clk_set_rate(mdwc->core_clk, 19200000);
3581 clk_disable_unprepare(mdwc->core_clk);
3582 clk_disable_unprepare(mdwc->iface_clk);
3583 clk_disable_unprepare(mdwc->sleep_clk);
3584 clk_disable_unprepare(mdwc->xo_clk);
3585 clk_put(mdwc->xo_clk);
3586
3587 dwc3_msm_config_gdsc(mdwc, 0);
3588
Jack Phambbe27962017-03-23 18:42:26 -07003589 if (mdwc->iommu_map) {
3590 if (!atomic_read(&dwc->in_lpm))
3591 arm_iommu_detach_device(mdwc->dev);
3592 arm_iommu_release_mapping(mdwc->iommu_map);
3593 }
3594
Mayank Rana511f3b22016-08-02 12:00:11 -07003595 return 0;
3596}
3597
Jack Pham4d4e9342016-12-07 19:25:02 -08003598static int dwc3_msm_host_notifier(struct notifier_block *nb,
3599 unsigned long event, void *ptr)
3600{
3601 struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, host_nb);
3602 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3603 struct usb_device *udev = ptr;
3604 union power_supply_propval pval;
3605 unsigned int max_power;
3606
3607 if (event != USB_DEVICE_ADD && event != USB_DEVICE_REMOVE)
3608 return NOTIFY_DONE;
3609
3610 if (!mdwc->usb_psy) {
3611 mdwc->usb_psy = power_supply_get_by_name("usb");
3612 if (!mdwc->usb_psy)
3613 return NOTIFY_DONE;
3614 }
3615
3616 /*
3617 * For direct-attach devices, new udev is direct child of root hub
3618 * i.e. dwc -> xhci -> root_hub -> udev
3619 * root_hub's udev->parent==NULL, so traverse struct device hierarchy
3620 */
3621 if (udev->parent && !udev->parent->parent &&
3622 udev->dev.parent->parent == &dwc->xhci->dev) {
3623 if (event == USB_DEVICE_ADD && udev->actconfig) {
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003624 if (!dwc3_msm_is_ss_rhport_connected(mdwc)) {
3625 /*
3626 * Core clock rate can be reduced only if root
3627 * hub SS port is not enabled/connected.
3628 */
3629 clk_set_rate(mdwc->core_clk,
3630 mdwc->core_clk_rate_hs);
3631 dev_dbg(mdwc->dev,
3632 "set hs core clk rate %ld\n",
3633 mdwc->core_clk_rate_hs);
3634 mdwc->max_rh_port_speed = USB_SPEED_HIGH;
3635 } else {
3636 mdwc->max_rh_port_speed = USB_SPEED_SUPER;
3637 }
3638
Jack Pham4d4e9342016-12-07 19:25:02 -08003639 if (udev->speed >= USB_SPEED_SUPER)
3640 max_power = udev->actconfig->desc.bMaxPower * 8;
3641 else
3642 max_power = udev->actconfig->desc.bMaxPower * 2;
3643 dev_dbg(mdwc->dev, "%s configured bMaxPower:%d (mA)\n",
3644 dev_name(&udev->dev), max_power);
3645
3646 /* inform PMIC of max power so it can optimize boost */
3647 pval.intval = max_power * 1000;
3648 power_supply_set_property(mdwc->usb_psy,
3649 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
3650 } else {
3651 pval.intval = 0;
3652 power_supply_set_property(mdwc->usb_psy,
3653 POWER_SUPPLY_PROP_BOOST_CURRENT, &pval);
Hemant Kumar6f504dc2017-02-07 14:13:58 -08003654
3655 /* set rate back to default core clk rate */
3656 clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
3657 dev_dbg(mdwc->dev, "set core clk rate %ld\n",
3658 mdwc->core_clk_rate);
Hemant Kumar8e4c2f22017-01-24 18:13:07 -08003659 mdwc->max_rh_port_speed = USB_SPEED_UNKNOWN;
Jack Pham4d4e9342016-12-07 19:25:02 -08003660 }
3661 }
3662
3663 return NOTIFY_DONE;
3664}
3665
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303666static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc, bool perf_mode)
3667{
3668 static bool curr_perf_mode;
3669 int latency = mdwc->pm_qos_latency;
3670
3671 if ((curr_perf_mode == perf_mode) || !latency)
3672 return;
3673
3674 if (perf_mode)
3675 pm_qos_update_request(&mdwc->pm_qos_req_dma, latency);
3676 else
3677 pm_qos_update_request(&mdwc->pm_qos_req_dma,
3678 PM_QOS_DEFAULT_VALUE);
3679
3680 curr_perf_mode = perf_mode;
3681 pr_debug("%s: latency updated to: %d\n", __func__,
3682 perf_mode ? latency : PM_QOS_DEFAULT_VALUE);
3683}
3684
3685static void msm_dwc3_perf_vote_work(struct work_struct *w)
3686{
3687 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
3688 perf_vote_work.work);
3689 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3690 static unsigned long last_irq_cnt;
3691 bool in_perf_mode = false;
3692
3693 if (dwc->irq_cnt - last_irq_cnt >= PM_QOS_THRESHOLD)
3694 in_perf_mode = true;
3695
3696 pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%lu\n",
3697 __func__, in_perf_mode, (dwc->irq_cnt - last_irq_cnt));
3698
3699 last_irq_cnt = dwc->irq_cnt;
3700 msm_dwc3_perf_vote_update(mdwc, in_perf_mode);
3701 schedule_delayed_work(&mdwc->perf_vote_work,
3702 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
3703}
3704
Mayank Rana511f3b22016-08-02 12:00:11 -07003705#define VBUS_REG_CHECK_DELAY (msecs_to_jiffies(1000))
3706
3707/**
3708 * dwc3_otg_start_host - helper function for starting/stoping the host
3709 * controller driver.
3710 *
3711 * @mdwc: Pointer to the dwc3_msm structure.
3712 * @on: start / stop the host controller driver.
3713 *
3714 * Returns 0 on success otherwise negative errno.
3715 */
3716static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
3717{
3718 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3719 int ret = 0;
3720
Mayank Rana511f3b22016-08-02 12:00:11 -07003721 /*
3722 * The vbus_reg pointer could have multiple values
3723 * NULL: regulator_get() hasn't been called, or was previously deferred
3724 * IS_ERR: regulator could not be obtained, so skip using it
3725 * Valid pointer otherwise
3726 */
Pratham Pratapd76a1782017-11-14 20:50:31 +05303727 if (!mdwc->vbus_reg && (!mdwc->type_c ||
3728 (mdwc->type_c && !mdwc->no_vbus_vote_type_c))) {
Mayank Rana511f3b22016-08-02 12:00:11 -07003729 mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
3730 "vbus_dwc3");
3731 if (IS_ERR(mdwc->vbus_reg) &&
3732 PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
3733 /* regulators may not be ready, so retry again later */
3734 mdwc->vbus_reg = NULL;
3735 return -EPROBE_DEFER;
3736 }
3737 }
3738
3739 if (on) {
3740 dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
3741
Mayank Rana511f3b22016-08-02 12:00:11 -07003742 mdwc->hs_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003743 if (dwc->maximum_speed == USB_SPEED_SUPER) {
Hemant Kumarde1df692016-04-26 19:36:48 -07003744 mdwc->ss_phy->flags |= PHY_HOST_MODE;
Mayank Rana0d5efd72017-06-08 10:06:00 -07003745 usb_phy_notify_connect(mdwc->ss_phy,
3746 USB_SPEED_SUPER);
3747 }
Hemant Kumarde1df692016-04-26 19:36:48 -07003748
Mayank Rana0d5efd72017-06-08 10:06:00 -07003749 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
Hemant Kumarde1df692016-04-26 19:36:48 -07003750 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003751 dbg_event(0xFF, "StrtHost gync",
3752 atomic_read(&mdwc->dev->power.usage_count));
Pratham Pratapd76a1782017-11-14 20:50:31 +05303753 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
Mayank Rana511f3b22016-08-02 12:00:11 -07003754 ret = regulator_enable(mdwc->vbus_reg);
3755 if (ret) {
3756 dev_err(mdwc->dev, "unable to enable vbus_reg\n");
3757 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3758 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3759 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003760 dbg_event(0xFF, "vregerr psync",
3761 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003762 return ret;
3763 }
3764
3765 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
3766
Jack Pham4d4e9342016-12-07 19:25:02 -08003767 mdwc->host_nb.notifier_call = dwc3_msm_host_notifier;
3768 usb_register_notify(&mdwc->host_nb);
3769
Manu Gautam976fdfc2016-08-18 09:27:35 +05303770 mdwc->usbdev_nb.notifier_call = msm_dwc3_usbdev_notify;
3771 usb_register_atomic_notify(&mdwc->usbdev_nb);
Mayank Ranaa75caa52017-10-10 11:45:13 -07003772 ret = dwc3_host_init(dwc);
Mayank Rana511f3b22016-08-02 12:00:11 -07003773 if (ret) {
3774 dev_err(mdwc->dev,
3775 "%s: failed to add XHCI pdev ret=%d\n",
3776 __func__, ret);
Pratham Pratapd76a1782017-11-14 20:50:31 +05303777 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
Mayank Rana511f3b22016-08-02 12:00:11 -07003778 regulator_disable(mdwc->vbus_reg);
3779 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
3780 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3781 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003782 dbg_event(0xFF, "pdeverr psync",
3783 atomic_read(&mdwc->dev->power.usage_count));
Jack Pham4d4e9342016-12-07 19:25:02 -08003784 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003785 return ret;
3786 }
3787
3788 /*
3789 * In some cases it is observed that USB PHY is not going into
3790 * suspend with host mode suspend functionality. Hence disable
3791 * XHCI's runtime PM here if disable_host_mode_pm is set.
3792 */
3793 if (mdwc->disable_host_mode_pm)
3794 pm_runtime_disable(&dwc->xhci->dev);
3795
3796 mdwc->in_host_mode = true;
3797 dwc3_usb3_phy_suspend(dwc, true);
3798
3799 /* xHCI should have incremented child count as necessary */
Mayank Rana08e41922017-03-02 15:25:48 -08003800 dbg_event(0xFF, "StrtHost psync",
3801 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003802 pm_runtime_mark_last_busy(mdwc->dev);
3803 pm_runtime_put_sync_autosuspend(mdwc->dev);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303804#ifdef CONFIG_SMP
3805 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3806 mdwc->pm_qos_req_dma.irq = dwc->irq;
3807#endif
3808 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3809 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3810 /* start in perf mode for better performance initially */
3811 msm_dwc3_perf_vote_update(mdwc, true);
3812 schedule_delayed_work(&mdwc->perf_vote_work,
3813 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003814 } else {
3815 dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
3816
Manu Gautam976fdfc2016-08-18 09:27:35 +05303817 usb_unregister_atomic_notify(&mdwc->usbdev_nb);
Pratham Pratapd76a1782017-11-14 20:50:31 +05303818 if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
Mayank Rana511f3b22016-08-02 12:00:11 -07003819 ret = regulator_disable(mdwc->vbus_reg);
3820 if (ret) {
3821 dev_err(mdwc->dev, "unable to disable vbus_reg\n");
3822 return ret;
3823 }
3824
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303825 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3826 msm_dwc3_perf_vote_update(mdwc, false);
3827 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3828
Mayank Rana511f3b22016-08-02 12:00:11 -07003829 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003830 dbg_event(0xFF, "StopHost gsync",
3831 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003832 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
Mayank Rana0d5efd72017-06-08 10:06:00 -07003833 if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
3834 usb_phy_notify_disconnect(mdwc->ss_phy,
3835 USB_SPEED_SUPER);
3836 mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
3837 }
3838
Mayank Rana511f3b22016-08-02 12:00:11 -07003839 mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
Mayank Ranaa75caa52017-10-10 11:45:13 -07003840 dwc3_host_exit(dwc);
Jack Pham4d4e9342016-12-07 19:25:02 -08003841 usb_unregister_notify(&mdwc->host_nb);
Mayank Rana511f3b22016-08-02 12:00:11 -07003842
Mayank Rana511f3b22016-08-02 12:00:11 -07003843 dwc3_usb3_phy_suspend(dwc, false);
Mayank Rana511f3b22016-08-02 12:00:11 -07003844 mdwc->in_host_mode = false;
3845
Mayank Ranaa1d094c2017-11-03 10:40:10 -07003846 pm_runtime_put_sync_suspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003847 dbg_event(0xFF, "StopHost psync",
3848 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003849 }
3850
3851 return 0;
3852}
3853
3854static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
3855{
3856 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3857
3858 /* Update OTG VBUS Valid from HSPHY to controller */
3859 dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
3860 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
3861 UTMI_OTG_VBUS_VALID,
3862 vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
3863
3864 /* Update only if Super Speed is supported */
3865 if (dwc->maximum_speed == USB_SPEED_SUPER) {
3866 /* Update VBUS Valid from SSPHY to controller */
3867 dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
3868 LANE0_PWR_PRESENT,
3869 vbus_present ? LANE0_PWR_PRESENT : 0);
3870 }
3871}
3872
3873/**
3874 * dwc3_otg_start_peripheral - bind/unbind the peripheral controller.
3875 *
3876 * @mdwc: Pointer to the dwc3_msm structure.
3877 * @on: Turn ON/OFF the gadget.
3878 *
3879 * Returns 0 on success otherwise negative errno.
3880 */
3881static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
3882{
3883 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
3884
3885 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003886 dbg_event(0xFF, "StrtGdgt gsync",
3887 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003888
3889 if (on) {
3890 dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
3891 __func__, dwc->gadget.name);
3892
3893 dwc3_override_vbus_status(mdwc, true);
3894 usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
3895 usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
3896
3897 /*
3898 * Core reset is not required during start peripheral. Only
3899 * DBM reset is required, hence perform only DBM reset here.
3900 */
3901 dwc3_msm_block_reset(mdwc, false);
3902
3903 dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
Mayank Rana7e781e72017-12-13 17:27:23 -08003904 mdwc->in_device_mode = true;
Mayank Rana511f3b22016-08-02 12:00:11 -07003905 usb_gadget_vbus_connect(&dwc->gadget);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303906#ifdef CONFIG_SMP
3907 mdwc->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
3908 mdwc->pm_qos_req_dma.irq = dwc->irq;
3909#endif
3910 pm_qos_add_request(&mdwc->pm_qos_req_dma,
3911 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3912 /* start in perf mode for better performance initially */
3913 msm_dwc3_perf_vote_update(mdwc, true);
3914 schedule_delayed_work(&mdwc->perf_vote_work,
3915 msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC));
Mayank Rana511f3b22016-08-02 12:00:11 -07003916 } else {
3917 dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
3918 __func__, dwc->gadget.name);
Vijayavardhan Vennapusae6d3f802016-12-15 13:48:39 +05303919 cancel_delayed_work_sync(&mdwc->perf_vote_work);
3920 msm_dwc3_perf_vote_update(mdwc, false);
3921 pm_qos_remove_request(&mdwc->pm_qos_req_dma);
3922
Mayank Rana7e781e72017-12-13 17:27:23 -08003923 mdwc->in_device_mode = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07003924 usb_gadget_vbus_disconnect(&dwc->gadget);
3925 usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
3926 usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
3927 dwc3_override_vbus_status(mdwc, false);
3928 dwc3_usb3_phy_suspend(dwc, false);
3929 }
3930
3931 pm_runtime_put_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08003932 dbg_event(0xFF, "StopGdgt psync",
3933 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07003934
3935 return 0;
3936}
3937
Mayank Rana54d60432017-07-18 12:10:04 -07003938/* speed: 0 - USB_SPEED_HIGH, 1 - USB_SPEED_SUPER */
3939static int dwc3_restart_usb_host_mode(struct notifier_block *nb,
3940 unsigned long event, void *ptr)
3941{
3942 struct dwc3_msm *mdwc;
3943 struct dwc3 *dwc;
3944 int ret = -EINVAL, usb_speed;
3945
3946 mdwc = container_of(nb, struct dwc3_msm, host_restart_nb);
3947 dwc = platform_get_drvdata(mdwc->dwc3);
3948
3949 usb_speed = (event == 0 ? USB_SPEED_HIGH : USB_SPEED_SUPER);
3950 if (dwc->maximum_speed == usb_speed)
3951 goto err;
3952
Mayank Rana8a5cba82017-10-27 15:12:54 -07003953 dbg_event(0xFF, "fw_restarthost", 0);
3954 flush_delayed_work(&mdwc->sm_work);
Mayank Rana54d60432017-07-18 12:10:04 -07003955 dbg_event(0xFF, "stop_host_mode", dwc->maximum_speed);
3956 ret = dwc3_otg_start_host(mdwc, 0);
3957 if (ret)
3958 goto err;
3959
Vijayavardhan Vennapusa2ba9b802017-12-08 10:46:44 +05303960 dbg_event(0xFF, "USB_lpm_state", atomic_read(&dwc->in_lpm));
Mayank Rana54d60432017-07-18 12:10:04 -07003961 /*
3962 * stop host mode functionality performs autosuspend with mdwc
3963 * device, and it may take sometime to call PM runtime suspend.
3964 * Hence call pm_runtime_suspend() API to invoke PM runtime
3965 * suspend immediately to put USB controller and PHYs into suspend.
3966 */
3967 ret = pm_runtime_suspend(mdwc->dev);
Vijayavardhan Vennapusa2ba9b802017-12-08 10:46:44 +05303968 /*
3969 * If mdwc device is already suspended, pm_runtime_suspend() API
3970 * returns 1, which is not error. Overwrite with zero if it is.
3971 */
3972 if (ret > 0)
3973 ret = 0;
Mayank Rana54d60432017-07-18 12:10:04 -07003974 dbg_event(0xFF, "pm_runtime_sus", ret);
3975
3976 dwc->maximum_speed = usb_speed;
3977 mdwc->otg_state = OTG_STATE_B_IDLE;
3978 schedule_delayed_work(&mdwc->sm_work, 0);
3979 dbg_event(0xFF, "complete_host_change", dwc->maximum_speed);
3980err:
3981 return ret;
3982}
3983
Hemant Kumar006fae42017-07-12 18:11:25 -07003984static int get_psy_type(struct dwc3_msm *mdwc)
Mayank Rana511f3b22016-08-02 12:00:11 -07003985{
Jack Pham8caff352016-08-19 16:33:55 -07003986 union power_supply_propval pval = {0};
Mayank Rana511f3b22016-08-02 12:00:11 -07003987
3988 if (mdwc->charging_disabled)
Hemant Kumar006fae42017-07-12 18:11:25 -07003989 return -EINVAL;
Mayank Rana511f3b22016-08-02 12:00:11 -07003990
3991 if (!mdwc->usb_psy) {
3992 mdwc->usb_psy = power_supply_get_by_name("usb");
3993 if (!mdwc->usb_psy) {
Hemant Kumar006fae42017-07-12 18:11:25 -07003994 dev_err(mdwc->dev, "Could not get usb psy\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07003995 return -ENODEV;
3996 }
3997 }
3998
Hemant Kumar006fae42017-07-12 18:11:25 -07003999 power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
4000 &pval);
4001
4002 return pval.intval;
4003}
4004
4005static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
4006{
4007 union power_supply_propval pval = {0};
4008 int ret, psy_type;
4009
Hemant Kumar006fae42017-07-12 18:11:25 -07004010 psy_type = get_psy_type(mdwc);
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05304011 if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
Sai Krishna Juturif236f3e2017-12-08 15:00:43 +05304012 if (!mA)
4013 pval.intval = -ETIMEDOUT;
4014 else
4015 pval.intval = 1000 * mA;
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05304016 goto set_prop;
Hemant Kumard6bae052017-07-27 15:11:25 -07004017 }
Jack Pham8caff352016-08-19 16:33:55 -07004018
Vijayavardhan Vennapusac7f9b0f2017-10-03 14:44:52 +05304019 if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
4020 return 0;
4021
4022 dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
4023 /* Set max current limit in uA */
4024 pval.intval = 1000 * mA;
4025
4026set_prop:
Jack Phamd72bafe2016-08-09 11:07:22 -07004027 ret = power_supply_set_property(mdwc->usb_psy,
Nicholas Troast7f55c922017-07-25 13:18:03 -07004028 POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
Jack Phamd72bafe2016-08-09 11:07:22 -07004029 if (ret) {
4030 dev_dbg(mdwc->dev, "power supply error when setting property\n");
4031 return ret;
4032 }
Mayank Rana511f3b22016-08-02 12:00:11 -07004033
4034 mdwc->max_power = mA;
4035 return 0;
Mayank Rana511f3b22016-08-02 12:00:11 -07004036}
4037
4038
4039/**
4040 * dwc3_otg_sm_work - workqueue function.
4041 *
4042 * @w: Pointer to the dwc3 otg workqueue
4043 *
4044 * NOTE: After any change in otg_state, we must reschdule the state machine.
4045 */
4046static void dwc3_otg_sm_work(struct work_struct *w)
4047{
4048 struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
4049 struct dwc3 *dwc = NULL;
4050 bool work = 0;
4051 int ret = 0;
4052 unsigned long delay = 0;
4053 const char *state;
4054
4055 if (mdwc->dwc3)
4056 dwc = platform_get_drvdata(mdwc->dwc3);
4057
4058 if (!dwc) {
4059 dev_err(mdwc->dev, "dwc is NULL.\n");
4060 return;
4061 }
4062
4063 state = usb_otg_state_string(mdwc->otg_state);
4064 dev_dbg(mdwc->dev, "%s state\n", state);
Mayank Rana08e41922017-03-02 15:25:48 -08004065 dbg_event(0xFF, state, 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004066
4067 /* Check OTG state */
4068 switch (mdwc->otg_state) {
4069 case OTG_STATE_UNDEFINED:
Hemant Kumar8220a982017-01-19 18:11:34 -08004070 /* put controller and phy in suspend if no cable connected */
Mayank Rana511f3b22016-08-02 12:00:11 -07004071 if (test_bit(ID, &mdwc->inputs) &&
Hemant Kumar8220a982017-01-19 18:11:34 -08004072 !test_bit(B_SESS_VLD, &mdwc->inputs)) {
4073 dbg_event(0xFF, "undef_id_!bsv", 0);
4074 pm_runtime_set_active(mdwc->dev);
4075 pm_runtime_enable(mdwc->dev);
4076 pm_runtime_get_noresume(mdwc->dev);
4077 dwc3_msm_resume(mdwc);
4078 pm_runtime_put_sync(mdwc->dev);
4079 dbg_event(0xFF, "Undef NoUSB",
4080 atomic_read(&mdwc->dev->power.usage_count));
4081 mdwc->otg_state = OTG_STATE_B_IDLE;
Mayank Rana511f3b22016-08-02 12:00:11 -07004082 break;
Hemant Kumar8220a982017-01-19 18:11:34 -08004083 }
Mayank Rana511f3b22016-08-02 12:00:11 -07004084
Mayank Rana08e41922017-03-02 15:25:48 -08004085 dbg_event(0xFF, "Exit UNDEF", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004086 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar8220a982017-01-19 18:11:34 -08004087 pm_runtime_set_suspended(mdwc->dev);
4088 pm_runtime_enable(mdwc->dev);
Mayank Rana511f3b22016-08-02 12:00:11 -07004089 /* fall-through */
4090 case OTG_STATE_B_IDLE:
4091 if (!test_bit(ID, &mdwc->inputs)) {
4092 dev_dbg(mdwc->dev, "!id\n");
4093 mdwc->otg_state = OTG_STATE_A_IDLE;
4094 work = 1;
4095 } else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
4096 dev_dbg(mdwc->dev, "b_sess_vld\n");
Hemant Kumar006fae42017-07-12 18:11:25 -07004097 if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
4098 queue_delayed_work(mdwc->dwc3_wq,
4099 &mdwc->sdp_check,
4100 msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
Mayank Rana511f3b22016-08-02 12:00:11 -07004101 /*
4102 * Increment pm usage count upon cable connect. Count
4103 * is decremented in OTG_STATE_B_PERIPHERAL state on
4104 * cable disconnect or in bus suspend.
4105 */
4106 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004107 dbg_event(0xFF, "BIDLE gsync",
4108 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004109 dwc3_otg_start_peripheral(mdwc, 1);
4110 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4111 work = 1;
4112 } else {
4113 dwc3_msm_gadget_vbus_draw(mdwc, 0);
4114 dev_dbg(mdwc->dev, "Cable disconnected\n");
4115 }
4116 break;
4117
4118 case OTG_STATE_B_PERIPHERAL:
4119 if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
4120 !test_bit(ID, &mdwc->inputs)) {
4121 dev_dbg(mdwc->dev, "!id || !bsv\n");
4122 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004123 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004124 dwc3_otg_start_peripheral(mdwc, 0);
4125 /*
4126 * Decrement pm usage count upon cable disconnect
4127 * which was incremented upon cable connect in
4128 * OTG_STATE_B_IDLE state
4129 */
Mayank Ranace7ff8b62017-11-09 17:25:55 -08004130 pm_runtime_put_sync_suspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004131 dbg_event(0xFF, "!BSV psync",
4132 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004133 work = 1;
4134 } else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
4135 test_bit(B_SESS_VLD, &mdwc->inputs)) {
4136 dev_dbg(mdwc->dev, "BPER bsv && susp\n");
4137 mdwc->otg_state = OTG_STATE_B_SUSPEND;
4138 /*
4139 * Decrement pm usage count upon bus suspend.
4140 * Count was incremented either upon cable
4141 * connect in OTG_STATE_B_IDLE or host
4142 * initiated resume after bus suspend in
4143 * OTG_STATE_B_SUSPEND state
4144 */
4145 pm_runtime_mark_last_busy(mdwc->dev);
4146 pm_runtime_put_autosuspend(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004147 dbg_event(0xFF, "SUSP put",
4148 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004149 }
4150 break;
4151
4152 case OTG_STATE_B_SUSPEND:
4153 if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
4154 dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
4155 mdwc->otg_state = OTG_STATE_B_IDLE;
Hemant Kumar006fae42017-07-12 18:11:25 -07004156 cancel_delayed_work_sync(&mdwc->sdp_check);
Mayank Rana511f3b22016-08-02 12:00:11 -07004157 dwc3_otg_start_peripheral(mdwc, 0);
4158 } else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
4159 dev_dbg(mdwc->dev, "BSUSP !susp\n");
4160 mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
4161 /*
4162 * Increment pm usage count upon host
4163 * initiated resume. Count was decremented
4164 * upon bus suspend in
4165 * OTG_STATE_B_PERIPHERAL state.
4166 */
4167 pm_runtime_get_sync(mdwc->dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004168 dbg_event(0xFF, "!SUSP gsync",
4169 atomic_read(&mdwc->dev->power.usage_count));
Mayank Rana511f3b22016-08-02 12:00:11 -07004170 }
4171 break;
4172
4173 case OTG_STATE_A_IDLE:
4174 /* Switch to A-Device*/
4175 if (test_bit(ID, &mdwc->inputs)) {
4176 dev_dbg(mdwc->dev, "id\n");
4177 mdwc->otg_state = OTG_STATE_B_IDLE;
4178 mdwc->vbus_retry_count = 0;
4179 work = 1;
4180 } else {
4181 mdwc->otg_state = OTG_STATE_A_HOST;
4182 ret = dwc3_otg_start_host(mdwc, 1);
4183 if ((ret == -EPROBE_DEFER) &&
4184 mdwc->vbus_retry_count < 3) {
4185 /*
4186 * Get regulator failed as regulator driver is
4187 * not up yet. Will try to start host after 1sec
4188 */
4189 mdwc->otg_state = OTG_STATE_A_IDLE;
4190 dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
4191 delay = VBUS_REG_CHECK_DELAY;
4192 work = 1;
4193 mdwc->vbus_retry_count++;
4194 } else if (ret) {
4195 dev_err(mdwc->dev, "unable to start host\n");
4196 mdwc->otg_state = OTG_STATE_A_IDLE;
4197 goto ret;
4198 }
4199 }
4200 break;
4201
4202 case OTG_STATE_A_HOST:
Manu Gautam976fdfc2016-08-18 09:27:35 +05304203 if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
4204 dev_dbg(mdwc->dev, "id || hc_died\n");
Mayank Rana511f3b22016-08-02 12:00:11 -07004205 dwc3_otg_start_host(mdwc, 0);
4206 mdwc->otg_state = OTG_STATE_B_IDLE;
4207 mdwc->vbus_retry_count = 0;
Manu Gautam976fdfc2016-08-18 09:27:35 +05304208 mdwc->hc_died = false;
Mayank Rana511f3b22016-08-02 12:00:11 -07004209 work = 1;
4210 } else {
4211 dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004212 dbg_event(0xFF, "XHCIResume", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004213 if (dwc)
4214 pm_runtime_resume(&dwc->xhci->dev);
4215 }
4216 break;
4217
4218 default:
4219 dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
4220
4221 }
4222
4223 if (work)
4224 schedule_delayed_work(&mdwc->sm_work, delay);
4225
4226ret:
4227 return;
4228}
4229
4230#ifdef CONFIG_PM_SLEEP
4231static int dwc3_msm_pm_suspend(struct device *dev)
4232{
4233 int ret = 0;
4234 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4235 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4236
4237 dev_dbg(dev, "dwc3-msm PM suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004238 dbg_event(0xFF, "PM Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004239
4240 flush_workqueue(mdwc->dwc3_wq);
4241 if (!atomic_read(&dwc->in_lpm)) {
4242 dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
4243 return -EBUSY;
4244 }
4245
4246 ret = dwc3_msm_suspend(mdwc);
4247 if (!ret)
4248 atomic_set(&mdwc->pm_suspended, 1);
4249
4250 return ret;
4251}
4252
4253static int dwc3_msm_pm_resume(struct device *dev)
4254{
4255 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004256 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004257
4258 dev_dbg(dev, "dwc3-msm PM resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004259 dbg_event(0xFF, "PM Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004260
Mayank Rana511f3b22016-08-02 12:00:11 -07004261 /* flush to avoid race in read/write of pm_suspended */
4262 flush_workqueue(mdwc->dwc3_wq);
4263 atomic_set(&mdwc->pm_suspended, 0);
4264
4265 /* kick in otg state machine */
4266 queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
4267
4268 return 0;
4269}
4270#endif
4271
4272#ifdef CONFIG_PM
4273static int dwc3_msm_runtime_idle(struct device *dev)
4274{
Mayank Rana08e41922017-03-02 15:25:48 -08004275 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
4276 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
4277
Mayank Rana511f3b22016-08-02 12:00:11 -07004278 dev_dbg(dev, "DWC3-msm runtime idle\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004279 dbg_event(0xFF, "RT Idle", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004280
4281 return 0;
4282}
4283
4284static int dwc3_msm_runtime_suspend(struct device *dev)
4285{
4286 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004287 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004288
4289 dev_dbg(dev, "DWC3-msm runtime suspend\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004290 dbg_event(0xFF, "RT Sus", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004291
4292 return dwc3_msm_suspend(mdwc);
4293}
4294
4295static int dwc3_msm_runtime_resume(struct device *dev)
4296{
4297 struct dwc3_msm *mdwc = dev_get_drvdata(dev);
Mayank Rana08e41922017-03-02 15:25:48 -08004298 struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
Mayank Rana511f3b22016-08-02 12:00:11 -07004299
4300 dev_dbg(dev, "DWC3-msm runtime resume\n");
Mayank Rana08e41922017-03-02 15:25:48 -08004301 dbg_event(0xFF, "RT Res", 0);
Mayank Rana511f3b22016-08-02 12:00:11 -07004302
4303 return dwc3_msm_resume(mdwc);
4304}
4305#endif
4306
4307static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
4308 SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
4309 SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
4310 dwc3_msm_runtime_idle)
4311};
4312
4313static const struct of_device_id of_dwc3_matach[] = {
4314 {
4315 .compatible = "qcom,dwc-usb3-msm",
4316 },
4317 { },
4318};
4319MODULE_DEVICE_TABLE(of, of_dwc3_matach);
4320
4321static struct platform_driver dwc3_msm_driver = {
4322 .probe = dwc3_msm_probe,
4323 .remove = dwc3_msm_remove,
4324 .driver = {
4325 .name = "msm-dwc3",
4326 .pm = &dwc3_msm_dev_pm_ops,
4327 .of_match_table = of_dwc3_matach,
4328 },
4329};
4330
4331MODULE_LICENSE("GPL v2");
4332MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
4333
4334static int dwc3_msm_init(void)
4335{
4336 return platform_driver_register(&dwc3_msm_driver);
4337}
4338module_init(dwc3_msm_init);
4339
4340static void __exit dwc3_msm_exit(void)
4341{
4342 platform_driver_unregister(&dwc3_msm_driver);
4343}
4344module_exit(dwc3_msm_exit);